code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
from distutils.core import setup
version = '%s.%s' % __import__('django_restapi').VERSION[:2]
setup(name='django-rest',
version=version,
packages=['django_restapi'],
author='Andriy Drozdyuk',
author_email='drozzy@gmail.com',
)
| Python |
from os.path import realpath
DEBUG = True
TEMPLATE_DEBUG = True
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.sites',
'django_restapi_tests.polls',
'django_restapi_tests.people'
)
SITE_ID=1
ROOT_URLCONF = 'django_restapi_tests.urls'
DATABASE_NAME = realpath('testdata')
DATABASE_ENGINE = 'sqlite3'
TEMPLATE_DIRS = 'templates'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.middleware.common.CommonMiddleware',
# 'django.middleware.doc.XViewMiddleware',
)
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from datetime import datetime
from django.db import models
from django.utils.translation import gettext_lazy as _
class Poll(models.Model):
question = models.CharField(max_length=200)
password = models.CharField(max_length=200)
pub_date = models.DateTimeField(_('date published'), default=datetime.now)
class Admin:
pass
def __str__(self):
return self.question
def get_choice_list(self):
return list(self.choice_set.order_by('id'))
def get_choice_from_num(self, choice_num):
try:
return self.get_choice_list()[int(choice_num)-1]
except IndexError:
raise Choice.DoesNotExist
class Choice(models.Model):
poll = models.ForeignKey(Poll)
choice = models.CharField(max_length=200)
votes = models.IntegerField()
class Admin:
pass
def __str__(self):
return self.choice
def get_num(self):
try:
return self.poll.get_choice_list().index(self)+1
except ValueError:
raise Choice.DoesNotExist | Python |
from binascii import b2a_base64
from datetime import datetime
from django.core import serializers
from django.test import TestCase
from django.utils.functional import curry
from django_restapi.authentication import HttpDigestAuthentication
from django_restapi_tests.examples.authentication import digest_authfunc
from django_restapi_tests.polls.models import Poll
import webbrowser, re
DIGEST_AUTH = 'Digest username="%(username)s", realm="%(realm)s", nonce="%(nonce)s", uri="%(fullpath)s", algorithm=MD5, response="%(response)s", qop=%(qop)s, nc=%(nc)s, cnonce="%(cnonce)s"'
SHOW_ERRORS_IN_BROWSER = False
def show_in_browser(content):
if SHOW_ERRORS_IN_BROWSER:
f = open("/tmp/djangorest_error", "w")
f.write(content)
f.close()
webbrowser.open_new("file:///tmp/djangorest_error")
class BasicTest(TestCase):
fixtures = ['initial_data.json']
def setUp(self):
self.client.put = curry(self.client.post, REQUEST_METHOD='PUT')
self.client.delete = curry(self.client.get, REQUEST_METHOD='DELETE')
def test_basics(self):
for format in ['xml', 'html']:
# Get list of polls
url = '/%s/polls/' % format
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(response.content.find('secret'), -1)
# Get list of choices
url = '/%s/choices/' % format
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
# Second page of choices must exist.
response = self.client.get(url, {'page' : 2})
self.failUnlessEqual(response.status_code, 200)
# Third page must not exist.
response = self.client.get(url, {'page' : 3})
self.failUnlessEqual(response.status_code, 404)
# Try to create poll with insufficient data
# (needs to fail)
url = '/%s/polls/' % format
params = {
'question' : 'Does this not work?',
}
response = self.client.post(url, params)
self.failUnlessEqual(response.status_code, 400)
# Create poll
params = {
'question' : 'Does this work?',
'password' : 'secret',
'pub_date' : '2001-01-01'
}
response = self.client.post(url, params)
self.failUnlessEqual(response.status_code, 201)
location = response['Location']
poll_id = int(re.findall("\d+", location)[0])
# Try to change poll with inappropriate data
# (needs to fail)
url = '/%s/polls/%d/' % (format, poll_id)
params = {
'question' : 'Yes, it works.',
'password' : 'newsecret',
'pub_date' : '2007-07-07-123'
}
response = self.client.put(url, params)
self.failUnlessEqual(response.status_code, 400)
# Change poll
url = '/%s/polls/%d/' % (format, poll_id)
params = {
'question' : 'Yes, it works.',
'password' : 'newsecret',
'pub_date' : '2007-07-07'
}
response = self.client.put(url, params)
self.failUnlessEqual(response.status_code, 200)
# Read poll
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(response.content.find('secret'), -1)
# Delete poll
response = self.client.delete(url)
self.failUnlessEqual(response.status_code, 200)
# Read choice
url = '/%s/choices/1/' % format
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
# Try to delete choice (must fail)
response = self.client.delete(url)
self.failUnlessEqual(response.status_code, 405)
def test_urlpatterns(self):
url = '/json/polls/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(response.content.find('secret'), -1)
# Get poll
url = '/json/polls/1/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(response.content.find('secret'), -1)
# Get filtered list of choices
url = '/json/polls/1/choices/'
response = self.client.get(url)
self.failUnlessEqual(len(eval(response.content)), 3)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(response.content.find('secret'), -1)
# Get choice
url = '/json/polls/1/choices/1/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(response.content.find('secret'), -1)
# Get choice (failure)
url = '/json/polls/1/choices/12/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 404)
self.failUnlessEqual(response.content.find('secret'), -1)
# Try to create poll with insufficient data
# (needs to fail)
url = '/json/polls/'
params = {
'question' : 'Does this not work?',
}
response = self.client.post(url, params)
self.failUnlessEqual(response.status_code, 400)
# Create choice
url = '/json/polls/1/choices/'
params = {
'poll' : 1, # TODO: Should be taken from URL
'choice' : 'New choice',
'votes' : 0
}
response = self.client.post(url, params)
self.failUnlessEqual(response.status_code, 201)
location = response['location']
poll_id = int(re.findall("\d+", location)[0])
self.failUnlessEqual(poll_id, 1)
# Try to update choice with insufficient data (needs to fail)
url = location[17:]
# strip the protocol head and base url:
# only working with paths! (note: bad variable name choice!!!)
params = {
'poll' : poll_id,
'choice' : 'New choice',
'votes' : 'Should be an integer'
}
response = self.client.put(url, params)
self.failUnlessEqual(response.status_code, 400)
# Update choice
params = {
'poll' : poll_id,
'choice' : 'New choice',
'votes' : '712'
}
response = self.client.put(url, params)
self.failIfEqual(response.content.find("712"), -1)
self.failUnlessEqual(response.status_code, 200)
# Delete choice
response = self.client.delete(url)
self.failUnlessEqual(response.status_code, 200)
def test_submission(self):
# XML
url = '/fullxml/polls/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
# Create
new_poll = Poll(
question = 'Does XML submission work?',
password = 'secret',
pub_date = datetime.now()
)
serialized_poll = serializers.serialize('xml', [new_poll])
serialized_poll = serialized_poll.replace('pk="None"', 'pk="1"') # Is ignored, but needs to be an integer
response = self.client.post(url, data=serialized_poll, content_type='application/xml')
self.failUnlessEqual(response.status_code, 201)
response_content = re.sub('pk="\d+"', 'pk="1"', response.content)
self.failUnlessEqual(serialized_poll, response_content)
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
self.failIfEqual(response_content.find("XML submission"), -1)
# Update
url = '/fullxml/polls/1/'
updated_poll = Poll(
question = 'New question',
password = 'new_secret',
pub_date = datetime.now()
)
serialized_poll = serializers.serialize('xml', [updated_poll])
serialized_poll = serialized_poll.replace('pk="None"', 'pk="1"') # Is ignored, but needs to be an integer
response = self.client.put(url, data=serialized_poll, content_type='application/xml')
updated_poll = Poll.objects.get(id=1)
self.failUnlessEqual(updated_poll.question, "New question")
self.failUnlessEqual(updated_poll.password, "new_secret")
# JSON
url = '/fulljson/polls/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
# Create
new_poll = Poll(
question = 'Does JSON submission work?',
password = 'secret',
pub_date = datetime.now()
)
serialized_poll = serializers.serialize('json', [new_poll])
serialized_poll = serialized_poll.replace('"pk": null', '"pk": 1') # Is ignored, but needs to be an integer
response = self.client.post(url, data=serialized_poll, content_type='application/json')
self.failUnlessEqual(response.status_code, 201)
response_content = re.sub('"pk": \d+,', '"pk": 1,', response.content)
self.failUnlessEqual(serialized_poll, response_content)
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
self.failIfEqual(response_content.find("JSON submission"), -1)
# Update
url = '/fulljson/polls/2/'
updated_poll = Poll(
question = 'Another question',
password = 'another_secret',
pub_date = datetime.now()
)
serialized_poll = serializers.serialize('json', [updated_poll])
serialized_poll = serialized_poll.replace('"pk": "None"', '"pk": "1"') # Is ignored, but needs to be an integer
response = self.client.put(url, data=serialized_poll, content_type='application/json')
updated_poll = Poll.objects.get(id=2)
self.failUnlessEqual(updated_poll.question, "Another question")
self.failUnlessEqual(updated_poll.password, "another_secret")
class AuthenticationTest(TestCase):
fixtures = ['initial_data.json']
def get_digest_test_params(self, response, url, auth_helper):
"""
Extract authentication variables from server response
e.g. {'nonce': '477be2a405a439cdba5227be89ba0f76', 'qop': 'auth', 'realm': 'realm1', 'opaque': '67d958f952de6bd4c1a88686f1b8a896'}
and add missing params (method, path, username, cnonce, nc).
"""
www_auth_response = response['WWW-Authenticate']
self.failUnlessEqual(www_auth_response[:7].lower(), 'digest ')
auth_params = auth_helper.get_auth_dict(www_auth_response[7:])
self.failUnlessEqual(len(auth_params), 4)
auth_params.pop('opaque')
auth_params.update({'http_method': 'GET', 'fullpath': url, 'username': 'john', 'cnonce': '12345678', 'nc': '00000001'})
return auth_params
def test_basic_authentication(self):
# Basic authentication, no password
url = '/basic/polls/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 401)
# Basic authentication, wrong password
headers = {
'HTTP_AUTHORIZATION': 'Basic %s' % b2a_base64('rest:somepass')[:-1]
}
response = self.client.get(url, **headers)
self.failUnlessEqual(response.status_code, 401)
# Basic authentication, right password
headers = {
'HTTP_AUTHORIZATION': 'Basic %s' % b2a_base64('rest:rest')[:-1]
}
response = self.client.get(url, **headers)
self.failUnlessEqual(response.status_code, 200)
def test_digest_authentication(self):
# 1) Digest authentication, no password
url = '/digest/polls/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 401)
self.failUnlessEqual(response.has_header('WWW-Authenticate'), True)
# Set up an auth class in order to avoid duplicate
# authentication code.
auth_helper = HttpDigestAuthentication(authfunc=digest_authfunc, realm='realm1')
# 2) Digest authentication, wrong response (=wrong password)
auth_params = self.get_digest_test_params(response, url, auth_helper)
auth_params['response'] = 'wrongresponse'
headers = {
'SCRIPT_NAME' : '',
'HTTP_AUTHORIZATION': DIGEST_AUTH % auth_params
}
response = self.client.get(url, **headers)
self.failUnlessEqual(response.status_code, 401)
# 3) Digest authentication, right password
auth_params = self.get_digest_test_params(response, url, auth_helper)
response = auth_helper.get_auth_response(**auth_params)
auth_params['response'] = response
headers = {
'SCRIPT_NAME' : '',
'HTTP_AUTHORIZATION': DIGEST_AUTH % auth_params
}
response = self.client.get(url, **headers)
self.failUnlessEqual(response.status_code, 200)
| Python |
from django.conf.urls.defaults import *
from django.contrib import admin
urlpatterns = patterns('',
url(r'', include('django_restapi_tests.examples.simple')),
url(r'', include('django_restapi_tests.examples.basic')),
url(r'', include('django_restapi_tests.examples.template')),
url(r'', include('django_restapi_tests.examples.custom_urls')),
url(r'', include('django_restapi_tests.examples.fixedend_urls')),
url(r'', include('django_restapi_tests.examples.authentication')),
url(r'', include('django_restapi_tests.examples.submission')),
url(r'', include('django_restapi_tests.examples.generic_resource')),
url(r'^admin/(.*)', admin.site.root)
)
| Python |
from django.conf.urls.defaults import *
from django_restapi.model_resource import Collection
from django_restapi.responder import *
from django_restapi.receiver import *
from django_restapi_tests.polls.models import Poll
fullxml_poll_resource = Collection(
queryset = Poll.objects.all(),
permitted_methods = ('GET', 'POST', 'PUT', 'DELETE'),
receiver = XMLReceiver(),
responder = XMLResponder(),
)
fulljson_poll_resource = Collection(
queryset = Poll.objects.all(),
permitted_methods = ('GET', 'POST', 'PUT', 'DELETE'),
receiver = JSONReceiver(),
responder = JSONResponder()
)
urlpatterns = patterns('',
url(r'^fullxml/polls/(.*?)/?$', fullxml_poll_resource),
url(r'^fulljson/polls/(.*?)/?$', fulljson_poll_resource)
)
| Python |
from django.conf.urls.defaults import *
from django_restapi.model_resource import Collection
from django_restapi.responder import *
from django_restapi_tests.polls.models import Poll, Choice
xml_poll_resource = Collection(
queryset = Poll.objects.all(),
permitted_methods = ('GET', 'POST', 'PUT', 'DELETE'),
expose_fields = ('id', 'question', 'pub_date'),
responder = XMLResponder(paginate_by = 10)
)
xml_choice_resource = Collection(
queryset = Choice.objects.all(),
permitted_methods = ('GET',),
expose_fields = ('id', 'poll_id', 'choice'),
responder = XMLResponder(paginate_by = 5)
)
urlpatterns = patterns('',
url(r'^xml/polls/(.*?)/?$', xml_poll_resource),
url(r'^xml/choices/(.*?)/?$', xml_choice_resource)
)
| Python |
from django.conf.urls.defaults import *
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django_restapi.resource import Resource
from django_restapi_tests.people.models import *
# Urls for a resource that does not map 1:1
# to Django models.
class FriendshipCollection(Resource):
def read(self, request):
friendships = get_friendship_list()
context = {'friendships':friendships}
return render_to_response('people/friends_list.html', context)
class FriendshipEntry(Resource):
def read(self, request, person_id, friend_id):
friendship = get_friendship(person_id, friend_id)
context = {'friendship':friendship}
return render_to_response('people/friends_detail.html', context)
def delete(self, request, person_id, friend_id):
friendship = get_friendship(person_id, friend_id)
friendship[0].friends.remove(friendship[1])
return HttpResponseRedirect('/friends/')
urlpatterns = patterns('',
url(r'^friends/$', FriendshipCollection()),
url(r'^friends/(?P<person_id>\d+)-(?P<friend_id>\d+)/$', FriendshipEntry(permitted_methods=('GET','DELETE'))),
) | Python |
from django.conf.urls.defaults import *
from django_restapi.model_resource import Collection, Entry, reverse
from django_restapi.responder import *
from django_restapi_tests.polls.models import Poll, Choice
# JSON Test API URLs
#
# Polls are available at /json/polls/ and
# /json/polls/[poll_id]/.
#
# Different (manual) URL structure for choices:
# /json/polls/[poll_id]/choices/[number of choice]/
# Example: /json/polls/121/choices/2/ identifies the second
# choice for the poll with ID 121.
class ChoiceCollection(Collection):
def read(self, request):
poll_id = int(request.path.split("/")[3])
filtered_set = self.queryset._clone()
filtered_set = filtered_set.filter(poll__id=poll_id)
return self.responder.list(request, filtered_set)
def get_entry(self, poll_id, choice_num):
poll = Poll.objects.get(id=int(poll_id))
choice = poll.get_choice_from_num(int(choice_num))
return ChoiceEntry(self, choice)
def get_url(self):
return reverse(self, (), {'poll_id':self.model.poll.id})
class ChoiceEntry(Entry):
def get_url(self):
choice_num = self.model.get_num()
return reverse(self.collection, (), {'poll_id':self.model.poll.id, 'choice_num':choice_num})
json_poll_resource = Collection(
queryset = Poll.objects.all(),
permitted_methods = ('GET', 'POST', 'PUT', 'DELETE'),
expose_fields = ('id', 'question', 'pub_date'),
responder = JSONResponder(paginate_by=10)
)
json_choice_resource = ChoiceCollection(
queryset = Choice.objects.all(),
permitted_methods = ('GET', 'POST', 'PUT', 'DELETE'),
expose_fields = ('id', 'poll_id', 'choice', 'votes'),
responder = JSONResponder(paginate_by=5),
entry_class = ChoiceEntry
)
urlpatterns = patterns('',
url(r'^json/polls/(?P<poll_id>\d+)/choices/(?P<choice_num>\d+)/$', json_choice_resource, {'is_entry':True}),
url(r'^json/polls/(?P<poll_id>\d+)/choices/$', json_choice_resource, {'is_entry':False}),
url(r'^json/polls/(.*?)/?$', json_poll_resource)
)
| Python |
from django.conf.urls.defaults import *
from django_restapi.model_resource import Collection
from django_restapi.responder import *
from django_restapi_tests.polls.models import Poll, Choice
template_poll_resource = Collection(
queryset = Poll.objects.all(),
permitted_methods = ('GET', 'POST', 'PUT', 'DELETE'),
expose_fields = ('id', 'question', 'pub_date'),
responder = TemplateResponder(
template_dir = 'polls',
template_object_name = 'poll',
paginate_by = 10
)
)
template_choice_resource = Collection(
queryset = Choice.objects.all(),
permitted_methods = ('GET',),
expose_fields = ('id', 'poll_id', 'choice', 'votes'),
responder = TemplateResponder(
template_dir = 'polls',
template_object_name = 'choice',
paginate_by = 5
)
)
urlpatterns = patterns('',
url(r'^html/polls/creator/$', template_poll_resource.responder.create_form),
url(r'^html/polls/(?P<pk>\d+)/editor/$', template_poll_resource.responder.update_form),
url(r'^html/polls/(.*?)/?$', template_poll_resource),
url(r'^html/choices/(.*?)/?$', template_choice_resource),
)
| Python |
from django.conf.urls.defaults import *
from django_restapi.model_resource import Collection
from django_restapi.responder import *
from django_restapi_tests.polls.models import Poll, Choice
simple_poll_resource = Collection(
queryset = Poll.objects.all(),
responder = XMLResponder(),
)
simple_choice_resource = Collection(
queryset = Choice.objects.all(),
responder = XMLResponder()
)
urlpatterns = patterns('',
url(r'^api/poll/(.*?)/?$', simple_poll_resource),
url(r'^api/choice/(.*?)/?$', simple_choice_resource)
)
| Python |
from django.conf.urls.defaults import *
from django_restapi.model_resource import Collection
from django_restapi.responder import *
from django_restapi_tests.polls.models import Poll, Choice
fixedend_poll_resource = Collection(
queryset = Poll.objects.all(),
responder = XMLResponder(),
)
fixedend_choice_resource = Collection(
queryset = Choice.objects.all(),
responder = XMLResponder()
)
urlpatterns = patterns('',
url(r'^polls/xml/$', fixedend_poll_resource),
url(r'^polls/(.*)/xml/$', fixedend_poll_resource),
url(r'^choices/xml/$', fixedend_choice_resource),
url(r'^choices/(.*)/xml/$', fixedend_choice_resource)
)
| Python |
from django.conf.urls.defaults import *
from django_restapi.model_resource import Collection
from django_restapi.responder import *
from django_restapi.authentication import *
from django_restapi_tests.polls.models import Poll
# HTTP Basic
#
# No auth function specified
# -> django.contrib.auth.models.User is used.
# Test with username 'rest', password 'rest'.
basicauth_poll_resource = Collection(
queryset = Poll.objects.all(),
responder = XMLResponder(),
authentication = HttpBasicAuthentication()
)
# HTTP Digest
def digest_authfunc(username, realm):
"""
Exemplary authfunc for HTTP Digest. In production situations,
the combined hashes of realm, username and password are usually
stored in an external file/db.
"""
hashes = {
('realm1', 'john') : '3014aff1d0d0f0038e23c1195301def3', # Password: johnspass
('realm2', 'jim') : '5bae77fe607e161b831c8f8026a2ceb2' # Password: jimspass
}
return hashes[(username, realm)]
digestauth_poll_resource = Collection(
queryset = Poll.objects.all(),
responder = XMLResponder(),
authentication = HttpDigestAuthentication(digest_authfunc, 'realm1')
)
urlpatterns = patterns('',
url(r'^basic/polls/(.*?)/?$', basicauth_poll_resource),
url(r'^digest/polls/(.*?)/?$', digestauth_poll_resource)
) | Python |
from django.db import models
from django.http import Http404
class Person(models.Model):
name = models.CharField(max_length=20)
friends = models.ManyToManyField('self')
idols = models.ManyToManyField('self', symmetrical=False, related_name='stalkers')
def __unicode__(self):
return self.name
def get_friendship_list():
people = Person.objects.filter(friends__isnull=False)
friendships = []
for person in people:
for friend in person.friends.all():
friendship = [person, friend]
friendship.sort(cmp=lambda x, y: cmp(x.name, y.name))
if friendship not in friendships:
friendships.append(friendship)
friendships.sort(cmp=lambda x, y: cmp(x[0].name, y[0].name))
return friendships
def get_friendship(person_id, friend_id):
person = Person.objects.get(id=person_id)
try:
friend = person.friends.get(id=friend_id)
except Person.DoesNotExist:
raise Http404
friendship = [person, friend]
friendship.sort(cmp=lambda x,y: cmp(x.name, y.name))
return friendship | Python |
from django.test import TestCase
from django.utils.functional import curry
class GenericTest(TestCase):
fixtures = ['initial_data.json']
def setUp(self):
self.client.put = curry(self.client.post, REQUEST_METHOD='PUT')
self.client.delete = curry(self.client.get, REQUEST_METHOD='DELETE')
def test_resource(self):
url = '/friends/'
response = self.client.post(url)
self.failUnlessEqual(response.status_code, 405)
response = self.client.put(url)
self.failUnlessEqual(response.status_code, 405)
response = self.client.delete(url)
self.failUnlessEqual(response.status_code, 405)
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
url = '/friends/1-2/'
response = self.client.post(url)
self.failUnlessEqual(response.status_code, 405)
response = self.client.put(url)
self.failUnlessEqual(response.status_code, 405)
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
response = self.client.delete(url)
self.failUnlessEqual(response.status_code, 302)
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 404) | Python |
from django.db import models
# Create your models here.
| Python |
# Create your views here.
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
#!/usr/bin/python2.4
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''The setup and build script for the python-twitter library.'''
__author__ = 'python-twitter@googlegroups.com'
__version__ = '0.8.3'
# The base package metadata to be used by both distutils and setuptools
METADATA = dict(
name = "python-twitter",
version = __version__,
py_modules = ['twitter'],
author='The Python-Twitter Developers',
author_email='python-twitter@googlegroups.com',
description='A python wrapper around the Twitter API',
license='Apache License 2.0',
url='http://code.google.com/p/python-twitter/',
keywords='twitter api',
)
# Extra package metadata to be used only if setuptools is installed
SETUPTOOLS_METADATA = dict(
install_requires = ['setuptools', 'simplejson', 'oauth2'],
include_package_data = True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
],
test_suite = 'twitter_test.suite',
)
def Read(file):
return open(file).read()
def BuildLongDescription():
return '\n'.join([Read('README'), Read('CHANGES')])
def Main():
# Build the long_description from the README and CHANGES
METADATA['long_description'] = BuildLongDescription()
# Use setuptools if available, otherwise fallback and use distutils
try:
import setuptools
METADATA.update(SETUPTOOLS_METADATA)
setuptools.setup(**METADATA)
except ImportError:
import distutils.core
distutils.core.setup(**METADATA)
if __name__ == '__main__':
Main()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
# parse_qsl moved to urlparse module in v2.6
try:
from urlparse import parse_qsl
except:
from cgi import parse_qsl
import oauth2 as oauth
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
consumer_key = None
consumer_secret = None
if consumer_key is None or consumer_secret is None:
print 'You need to edit this script and provide values for the'
print 'consumer_key and also consumer_secret.'
print ''
print 'The values you need come from Twitter - you need to register'
print 'as a developer your "application". This is needed only until'
print 'Twitter finishes the idea they have of a way to allow open-source'
print 'based libraries to have a token that can be used to generate a'
print 'one-time use key that will allow the library to make the request'
print 'on your behalf.'
print ''
sys.exit(1)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
print 'Requesting temp token from Twitter'
resp, content = oauth_client.request(REQUEST_TOKEN_URL, 'GET')
if resp['status'] != '200':
print 'Invalid respond from Twitter requesting temp token: %s' % resp['status']
else:
request_token = dict(parse_qsl(content))
print ''
print 'Please visit this Twitter page and retrieve the pincode to be used'
print 'in the next step to obtaining an Authentication Token:'
print ''
print '%s?oauth_token=%s' % (AUTHORIZATION_URL, request_token['oauth_token'])
print ''
pincode = raw_input('Pincode? ')
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(pincode)
print ''
print 'Generating and signing request for an access token'
print ''
oauth_client = oauth.Client(oauth_consumer, token)
resp, content = oauth_client.request(ACCESS_TOKEN_URL, method='POST', body='oauth_verifier=%s' % pincode)
access_token = dict(parse_qsl(content))
if resp['status'] != '200':
print 'The request for a Token did not succeed: %s' % resp['status']
print access_token
else:
print 'Your Twitter Access Token key: %s' % access_token['oauth_token']
print ' Access Token secret: %s' % access_token['oauth_token_secret']
print ''
| Python |
"""Implementation of JSONEncoder
"""
import re
try:
from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from simplejson._speedups import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif _skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| Python |
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from simplejson.scanner import make_scanner
try:
from simplejson._speedups import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
raise ValueError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise ValueError(errmsg(msg, s, end))
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError(errmsg(msg, s, end))
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError(errmsg(msg, s, end))
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = {}
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
| Python |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError("%r is not JSON serializable" % (o,))
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.0.7'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
| Python |
r"""Using simplejson from the shell to validate and
pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import simplejson
def main():
import sys
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit("%s [infile [outfile]]" % (sys.argv[0],))
try:
obj = simplejson.load(infile)
except ValueError, e:
raise SystemExit(e)
simplejson.dump(obj, outfile, sort_keys=True, indent=4)
outfile.write('\n')
if __name__ == '__main__':
main()
| Python |
"""JSON token scanner
"""
import re
try:
from simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
| Python |
#!/usr/bin/python2.4
'''Load the latest update for a Twitter user and leave it in an XHTML fragment'''
__author__ = 'dewitt@google.com'
import codecs
import getopt
import sys
import twitter
TEMPLATE = """
<div class="twitter">
<span class="twitter-user"><a href="http://twitter.com/%s">Twitter</a>: </span>
<span class="twitter-text">%s</span>
<span class="twitter-relative-created-at"><a href="http://twitter.com/%s/statuses/%s">Posted %s</a></span>
</div>
"""
def Usage():
print 'Usage: %s [options] twitterid' % __file__
print
print ' This script fetches a users latest twitter update and stores'
print ' the result in a file as an XHTML fragment'
print
print ' Options:'
print ' --help -h : print this help'
print ' --output : the output file [default: stdout]'
def FetchTwitter(user, output):
assert user
statuses = twitter.Api().GetUserTimeline(user=user, count=1)
s = statuses[0]
xhtml = TEMPLATE % (s.user.screen_name, s.text, s.user.screen_name, s.id, s.relative_created_at)
if output:
Save(xhtml, output)
else:
print xhtml
def Save(xhtml, output):
out = codecs.open(output, mode='w', encoding='ascii',
errors='xmlcharrefreplace')
out.write(xhtml)
out.close()
def main():
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'ho', ['help', 'output='])
except getopt.GetoptError:
Usage()
sys.exit(2)
try:
user = args[0]
except:
Usage()
sys.exit(2)
output = None
for o, a in opts:
if o in ("-h", "--help"):
Usage()
sys.exit(2)
if o in ("-o", "--output"):
output = a
FetchTwitter(user, output)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python2.4
'''Post a message to twitter'''
__author__ = 'dewitt@google.com'
import ConfigParser
import getopt
import os
import sys
import twitter
USAGE = '''Usage: tweet [options] message
This script posts a message to Twitter.
Options:
-h --help : print this help
--consumer-key : the twitter consumer key
--consumer-secret : the twitter consumer secret
--access-key : the twitter access token key
--access-secret : the twitter access token secret
--encoding : the character set encoding used in input strings, e.g. "utf-8". [optional]
Documentation:
If either of the command line flags are not present, the environment
variables TWEETUSERNAME and TWEETPASSWORD will then be checked for your
consumer_key or consumer_secret, respectively.
If neither the command line flags nor the enviroment variables are
present, the .tweetrc file, if it exists, can be used to set the
default consumer_key and consumer_secret. The file should contain the
following three lines, replacing *consumer_key* with your consumer key, and
*consumer_secret* with your consumer secret:
A skeletal .tweetrc file:
[Tweet]
consumer_key: *consumer_key*
consumer_secret: *consumer_password*
access_key: *access_key*
access_secret: *access_password*
'''
def PrintUsageAndExit():
print USAGE
sys.exit(2)
def GetConsumerKeyEnv():
return os.environ.get("TWEETUSERNAME", None)
def GetConsumerSecretEnv():
return os.environ.get("TWEETPASSWORD", None)
def GetAccessKeyEnv():
return os.environ.get("TWEETACCESSKEY", None)
def GetAccessSecretEnv():
return os.environ.get("TWEETACCESSSECRET", None)
class TweetRc(object):
def __init__(self):
self._config = None
def GetConsumerKey(self):
return self._GetOption('consumer_key')
def GetConsumerSecret(self):
return self._GetOption('consumer_secret')
def GetAccessKey(self):
return self._GetOption('access_key')
def GetAccessSecret(self):
return self._GetOption('access_secret')
def _GetOption(self, option):
try:
return self._GetConfig().get('Tweet', option)
except:
return None
def _GetConfig(self):
if not self._config:
self._config = ConfigParser.ConfigParser()
self._config.read(os.path.expanduser('~/.tweetrc'))
return self._config
def main():
try:
shortflags = 'h'
longflags = ['help', 'consumer-key=', 'consumer-secret=',
'access-key=', 'access-secret=', 'encoding=']
opts, args = getopt.gnu_getopt(sys.argv[1:], shortflags, longflags)
except getopt.GetoptError:
PrintUsageAndExit()
consumer_keyflag = None
consumer_secretflag = None
access_keyflag = None
access_secretflag = None
encoding = None
for o, a in opts:
if o in ("-h", "--help"):
PrintUsageAndExit()
if o in ("--consumer-key"):
consumer_keyflag = a
if o in ("--consumer-secret"):
consumer_secretflag = a
if o in ("--access-key"):
access_keyflag = a
if o in ("--access-secret"):
access_secretflag = a
if o in ("--encoding"):
encoding = a
message = ' '.join(args)
if not message:
PrintUsageAndExit()
rc = TweetRc()
consumer_key = consumer_keyflag or GetConsumerKeyEnv() or rc.GetConsumerKey()
consumer_secret = consumer_secretflag or GetConsumerSecretEnv() or rc.GetConsumerSecret()
access_key = access_keyflag or GetAccessKeyEnv() or rc.GetAccessKey()
access_secret = access_secretflag or GetAccessSecretEnv() or rc.GetAccessSecret()
if not consumer_key or not consumer_secret or not access_key or not access_secret:
PrintUsageAndExit()
api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret,
access_token_key=access_key, access_token_secret=access_secret,
input_encoding=encoding)
try:
status = api.PostUpdate(message)
except UnicodeDecodeError:
print "Your message could not be encoded. Perhaps it contains non-ASCII characters? "
print "Try explicitly specifying the encoding with the --encoding flag"
sys.exit(2)
print "%s just posted: %s" % (status.user.name, status.text)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''A class that defines the default URL Shortener.
TinyURL is provided as the default and as an example.
'''
import urllib
# Change History
#
# 2010-05-16
# TinyURL example and the idea for this comes from a bug filed by
# acolorado with patch provided by ghills. Class implementation
# was done by bear.
#
# Issue 19 http://code.google.com/p/python-twitter/issues/detail?id=19
#
class ShortenURL(object):
'''Helper class to make URL Shortener calls if/when required'''
def __init__(self,
userid=None,
password=None):
'''Instantiate a new ShortenURL object
Args:
userid: userid for any required authorization call [optional]
password: password for any required authorization call [optional]
'''
self.userid = userid
self.password = password
def Shorten(self,
longURL):
'''Call TinyURL API and returned shortened URL result
Args:
longURL: URL string to shorten
Returns:
The shortened URL as a string
Note:
longURL is required and no checks are made to ensure completeness
'''
result = None
f = urllib.urlopen("http://tinyurl.com/api-create.php?url=%s" % longURL)
try:
result = f.read()
finally:
f.close()
return result
| Python |
#!/usr/bin/python2.4
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''A library that provides a Python interface to the Twitter API'''
__author__ = 'python-twitter@googlegroups.com'
__version__ = '0.8.3'
import base64
import calendar
import datetime
import httplib
import os
import rfc822
import sys
import tempfile
import textwrap
import time
import calendar
import urllib
import urllib2
import urlparse
import gzip
import StringIO
try:
# Python >= 2.6
import json as simplejson
except ImportError:
try:
# Python < 2.6
import simplejson
except ImportError:
try:
# Google App Engine
from django.utils import simplejson
except ImportError:
raise ImportError, "Unable to load a json library"
# parse_qsl moved to urlparse module in v2.6
try:
from urlparse import parse_qsl, parse_qs
except ImportError:
from cgi import parse_qsl, parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
import oauth2 as oauth
CHARACTER_LIMIT = 140
# A singleton representing a lazily instantiated FileCache.
DEFAULT_CACHE = object()
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
class TwitterError(Exception):
'''Base class for Twitter errors'''
@property
def message(self):
'''Returns the first argument used to construct this error.'''
return self.args[0]
class Status(object):
'''A class representing the Status structure used by the twitter API.
The Status structure exposes the following properties:
status.created_at
status.created_at_in_seconds # read only
status.favorited
status.in_reply_to_screen_name
status.in_reply_to_user_id
status.in_reply_to_status_id
status.truncated
status.source
status.id
status.text
status.location
status.relative_created_at # read only
status.user
status.urls
status.user_mentions
status.hashtags
status.geo
status.place
status.coordinates
status.contributors
'''
def __init__(self,
created_at=None,
favorited=None,
id=None,
text=None,
location=None,
user=None,
in_reply_to_screen_name=None,
in_reply_to_user_id=None,
in_reply_to_status_id=None,
truncated=None,
source=None,
now=None,
urls=None,
user_mentions=None,
hashtags=None,
geo=None,
place=None,
coordinates=None,
contributors=None,
retweeted=None,
retweeted_status=None,
retweet_count=None):
'''An object to hold a Twitter status message.
This class is normally instantiated by the twitter.Api class and
returned in a sequence.
Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007"
Args:
created_at:
The time this status message was posted. [Optional]
favorited:
Whether this is a favorite of the authenticated user. [Optional]
id:
The unique id of this status message. [Optional]
text:
The text of this status message. [Optional]
location:
the geolocation string associated with this message. [Optional]
relative_created_at:
A human readable string representing the posting time. [Optional]
user:
A twitter.User instance representing the person posting the
message. [Optional]
now:
The current time, if the client choses to set it.
Defaults to the wall clock time. [Optional]
urls:
user_mentions:
hashtags:
geo:
place:
coordinates:
contributors:
retweeted:
retweeted_status:
retweet_count:
'''
self.created_at = created_at
self.favorited = favorited
self.id = id
self.text = text
self.location = location
self.user = user
self.now = now
self.in_reply_to_screen_name = in_reply_to_screen_name
self.in_reply_to_user_id = in_reply_to_user_id
self.in_reply_to_status_id = in_reply_to_status_id
self.truncated = truncated
self.retweeted = retweeted
self.source = source
self.urls = urls
self.user_mentions = user_mentions
self.hashtags = hashtags
self.geo = geo
self.place = place
self.coordinates = coordinates
self.contributors = contributors
self.retweeted_status = retweeted_status
self.retweet_count = retweet_count
def GetCreatedAt(self):
'''Get the time this status message was posted.
Returns:
The time this status message was posted
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set the time this status message was posted.
Args:
created_at:
The time this status message was created
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc='The time this status message was posted.')
def GetCreatedAtInSeconds(self):
'''Get the time this status message was posted, in seconds since the epoch.
Returns:
The time this status message was posted, in seconds since the epoch.
'''
return calendar.timegm(rfc822.parsedate(self.created_at))
created_at_in_seconds = property(GetCreatedAtInSeconds,
doc="The time this status message was "
"posted, in seconds since the epoch")
def GetFavorited(self):
'''Get the favorited setting of this status message.
Returns:
True if this status message is favorited; False otherwise
'''
return self._favorited
def SetFavorited(self, favorited):
'''Set the favorited state of this status message.
Args:
favorited:
boolean True/False favorited state of this status message
'''
self._favorited = favorited
favorited = property(GetFavorited, SetFavorited,
doc='The favorited state of this status message.')
def GetId(self):
'''Get the unique id of this status message.
Returns:
The unique id of this status message
'''
return self._id
def SetId(self, id):
'''Set the unique id of this status message.
Args:
id:
The unique id of this status message
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this status message.')
def GetInReplyToScreenName(self):
return self._in_reply_to_screen_name
def SetInReplyToScreenName(self, in_reply_to_screen_name):
self._in_reply_to_screen_name = in_reply_to_screen_name
in_reply_to_screen_name = property(GetInReplyToScreenName, SetInReplyToScreenName,
doc='')
def GetInReplyToUserId(self):
return self._in_reply_to_user_id
def SetInReplyToUserId(self, in_reply_to_user_id):
self._in_reply_to_user_id = in_reply_to_user_id
in_reply_to_user_id = property(GetInReplyToUserId, SetInReplyToUserId,
doc='')
def GetInReplyToStatusId(self):
return self._in_reply_to_status_id
def SetInReplyToStatusId(self, in_reply_to_status_id):
self._in_reply_to_status_id = in_reply_to_status_id
in_reply_to_status_id = property(GetInReplyToStatusId, SetInReplyToStatusId,
doc='')
def GetTruncated(self):
return self._truncated
def SetTruncated(self, truncated):
self._truncated = truncated
truncated = property(GetTruncated, SetTruncated,
doc='')
def GetRetweeted(self):
return self._retweeted
def SetRetweeted(self, retweeted):
self._retweeted = retweeted
retweeted = property(GetRetweeted, SetRetweeted,
doc='')
def GetSource(self):
return self._source
def SetSource(self, source):
self._source = source
source = property(GetSource, SetSource,
doc='')
def GetText(self):
'''Get the text of this status message.
Returns:
The text of this status message.
'''
return self._text
def SetText(self, text):
'''Set the text of this status message.
Args:
text:
The text of this status message
'''
self._text = text
text = property(GetText, SetText,
doc='The text of this status message')
def GetLocation(self):
'''Get the geolocation associated with this status message
Returns:
The geolocation string of this status message.
'''
return self._location
def SetLocation(self, location):
'''Set the geolocation associated with this status message
Args:
location:
The geolocation string of this status message
'''
self._location = location
location = property(GetLocation, SetLocation,
doc='The geolocation string of this status message')
def GetRelativeCreatedAt(self):
'''Get a human redable string representing the posting time
Returns:
A human readable string representing the posting time
'''
fudge = 1.25
delta = long(self.now) - long(self.created_at_in_seconds)
if delta < (1 * fudge):
return 'about a second ago'
elif delta < (60 * (1/fudge)):
return 'about %d seconds ago' % (delta)
elif delta < (60 * fudge):
return 'about a minute ago'
elif delta < (60 * 60 * (1/fudge)):
return 'about %d minutes ago' % (delta / 60)
elif delta < (60 * 60 * fudge) or delta / (60 * 60) == 1:
return 'about an hour ago'
elif delta < (60 * 60 * 24 * (1/fudge)):
return 'about %d hours ago' % (delta / (60 * 60))
elif delta < (60 * 60 * 24 * fudge) or delta / (60 * 60 * 24) == 1:
return 'about a day ago'
else:
return 'about %d days ago' % (delta / (60 * 60 * 24))
relative_created_at = property(GetRelativeCreatedAt,
doc='Get a human readable string representing '
'the posting time')
def GetUser(self):
'''Get a twitter.User reprenting the entity posting this status message.
Returns:
A twitter.User reprenting the entity posting this status message
'''
return self._user
def SetUser(self, user):
'''Set a twitter.User reprenting the entity posting this status message.
Args:
user:
A twitter.User reprenting the entity posting this status message
'''
self._user = user
user = property(GetUser, SetUser,
doc='A twitter.User reprenting the entity posting this '
'status message')
def GetNow(self):
'''Get the wallclock time for this status message.
Used to calculate relative_created_at. Defaults to the time
the object was instantiated.
Returns:
Whatever the status instance believes the current time to be,
in seconds since the epoch.
'''
if self._now is None:
self._now = time.time()
return self._now
def SetNow(self, now):
'''Set the wallclock time for this status message.
Used to calculate relative_created_at. Defaults to the time
the object was instantiated.
Args:
now:
The wallclock time for this instance.
'''
self._now = now
now = property(GetNow, SetNow,
doc='The wallclock time for this status instance.')
def GetGeo(self):
return self._geo
def SetGeo(self, geo):
self._geo = geo
geo = property(GetGeo, SetGeo,
doc='')
def GetPlace(self):
return self._place
def SetPlace(self, place):
self._place = place
place = property(GetPlace, SetPlace,
doc='')
def GetCoordinates(self):
return self._coordinates
def SetCoordinates(self, coordinates):
self._coordinates = coordinates
coordinates = property(GetCoordinates, SetCoordinates,
doc='')
def GetContributors(self):
return self._contributors
def SetContributors(self, contributors):
self._contributors = contributors
contributors = property(GetContributors, SetContributors,
doc='')
def GetRetweeted_status(self):
return self._retweeted_status
def SetRetweeted_status(self, retweeted_status):
self._retweeted_status = retweeted_status
retweeted_status = property(GetRetweeted_status, SetRetweeted_status,
doc='')
def GetRetweetCount(self):
return self._retweet_count
def SetRetweetCount(self, retweet_count):
self._retweet_count = retweet_count
retweet_count = property(GetRetweetCount, SetRetweetCount,
doc='')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.created_at == other.created_at and \
self.id == other.id and \
self.text == other.text and \
self.location == other.location and \
self.user == other.user and \
self.in_reply_to_screen_name == other.in_reply_to_screen_name and \
self.in_reply_to_user_id == other.in_reply_to_user_id and \
self.in_reply_to_status_id == other.in_reply_to_status_id and \
self.truncated == other.truncated and \
self.retweeted == other.retweeted and \
self.favorited == other.favorited and \
self.source == other.source and \
self.geo == other.geo and \
self.place == other.place and \
self.coordinates == other.coordinates and \
self.contributors == other.contributors and \
self.retweeted_status == other.retweeted_status and \
self.retweet_count == other.retweet_count
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.Status instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.Status instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.Status instance.
Returns:
A JSON string representation of this twitter.Status instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.Status instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.Status instance
'''
data = {}
if self.created_at:
data['created_at'] = self.created_at
if self.favorited:
data['favorited'] = self.favorited
if self.id:
data['id'] = self.id
if self.text:
data['text'] = self.text
if self.location:
data['location'] = self.location
if self.user:
data['user'] = self.user.AsDict()
if self.in_reply_to_screen_name:
data['in_reply_to_screen_name'] = self.in_reply_to_screen_name
if self.in_reply_to_user_id:
data['in_reply_to_user_id'] = self.in_reply_to_user_id
if self.in_reply_to_status_id:
data['in_reply_to_status_id'] = self.in_reply_to_status_id
if self.truncated is not None:
data['truncated'] = self.truncated
if self.retweeted is not None:
data['retweeted'] = self.retweeted
if self.favorited is not None:
data['favorited'] = self.favorited
if self.source:
data['source'] = self.source
if self.geo:
data['geo'] = self.geo
if self.place:
data['place'] = self.place
if self.coordinates:
data['coordinates'] = self.coordinates
if self.contributors:
data['contributors'] = self.contributors
if self.hashtags:
data['hashtags'] = [h.text for h in self.hashtags]
if self.retweeted_status:
data['retweeted_status'] = self.retweeted_status.AsDict()
if self.retweet_count:
data['retweet_count'] = self.retweet_count
if self.urls:
data['urls'] = dict([(url.url, url.expanded_url) for url in self.urls])
if self.user_mentions:
data['user_mentions'] = [um.AsDict() for um in self.user_mentions]
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data: A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Status instance
'''
if 'user' in data:
user = User.NewFromJsonDict(data['user'])
else:
user = None
if 'retweeted_status' in data:
retweeted_status = Status.NewFromJsonDict(data['retweeted_status'])
else:
retweeted_status = None
urls = None
user_mentions = None
hashtags = None
if 'entities' in data:
if 'urls' in data['entities']:
urls = [Url.NewFromJsonDict(u) for u in data['entities']['urls']]
if 'user_mentions' in data['entities']:
user_mentions = [User.NewFromJsonDict(u) for u in data['entities']['user_mentions']]
if 'hashtags' in data['entities']:
hashtags = [Hashtag.NewFromJsonDict(h) for h in data['entities']['hashtags']]
return Status(created_at=data.get('created_at', None),
favorited=data.get('favorited', None),
id=data.get('id', None),
text=data.get('text', None),
location=data.get('location', None),
in_reply_to_screen_name=data.get('in_reply_to_screen_name', None),
in_reply_to_user_id=data.get('in_reply_to_user_id', None),
in_reply_to_status_id=data.get('in_reply_to_status_id', None),
truncated=data.get('truncated', None),
retweeted=data.get('retweeted', None),
source=data.get('source', None),
user=user,
urls=urls,
user_mentions=user_mentions,
hashtags=hashtags,
geo=data.get('geo', None),
place=data.get('place', None),
coordinates=data.get('coordinates', None),
contributors=data.get('contributors', None),
retweeted_status=retweeted_status,
retweet_count=data.get('retweet_count', None))
class User(object):
'''A class representing the User structure used by the twitter API.
The User structure exposes the following properties:
user.id
user.name
user.screen_name
user.location
user.description
user.profile_image_url
user.profile_background_tile
user.profile_background_image_url
user.profile_sidebar_fill_color
user.profile_background_color
user.profile_link_color
user.profile_text_color
user.protected
user.utc_offset
user.time_zone
user.url
user.status
user.statuses_count
user.followers_count
user.friends_count
user.favourites_count
user.geo_enabled
user.verified
user.lang
user.notifications
user.contributors_enabled
user.created_at
user.listed_count
'''
def __init__(self,
id=None,
name=None,
screen_name=None,
location=None,
description=None,
profile_image_url=None,
profile_background_tile=None,
profile_background_image_url=None,
profile_sidebar_fill_color=None,
profile_background_color=None,
profile_link_color=None,
profile_text_color=None,
protected=None,
utc_offset=None,
time_zone=None,
followers_count=None,
friends_count=None,
statuses_count=None,
favourites_count=None,
url=None,
status=None,
geo_enabled=None,
verified=None,
lang=None,
notifications=None,
contributors_enabled=None,
created_at=None,
listed_count=None):
self.id = id
self.name = name
self.screen_name = screen_name
self.location = location
self.description = description
self.profile_image_url = profile_image_url
self.profile_background_tile = profile_background_tile
self.profile_background_image_url = profile_background_image_url
self.profile_sidebar_fill_color = profile_sidebar_fill_color
self.profile_background_color = profile_background_color
self.profile_link_color = profile_link_color
self.profile_text_color = profile_text_color
self.protected = protected
self.utc_offset = utc_offset
self.time_zone = time_zone
self.followers_count = followers_count
self.friends_count = friends_count
self.statuses_count = statuses_count
self.favourites_count = favourites_count
self.url = url
self.status = status
self.geo_enabled = geo_enabled
self.verified = verified
self.lang = lang
self.notifications = notifications
self.contributors_enabled = contributors_enabled
self.created_at = created_at
self.listed_count = listed_count
def GetId(self):
'''Get the unique id of this user.
Returns:
The unique id of this user
'''
return self._id
def SetId(self, id):
'''Set the unique id of this user.
Args:
id: The unique id of this user.
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this user.')
def GetName(self):
'''Get the real name of this user.
Returns:
The real name of this user
'''
return self._name
def SetName(self, name):
'''Set the real name of this user.
Args:
name: The real name of this user
'''
self._name = name
name = property(GetName, SetName,
doc='The real name of this user.')
def GetScreenName(self):
'''Get the short twitter name of this user.
Returns:
The short twitter name of this user
'''
return self._screen_name
def SetScreenName(self, screen_name):
'''Set the short twitter name of this user.
Args:
screen_name: the short twitter name of this user
'''
self._screen_name = screen_name
screen_name = property(GetScreenName, SetScreenName,
doc='The short twitter name of this user.')
def GetLocation(self):
'''Get the geographic location of this user.
Returns:
The geographic location of this user
'''
return self._location
def SetLocation(self, location):
'''Set the geographic location of this user.
Args:
location: The geographic location of this user
'''
self._location = location
location = property(GetLocation, SetLocation,
doc='The geographic location of this user.')
def GetDescription(self):
'''Get the short text description of this user.
Returns:
The short text description of this user
'''
return self._description
def SetDescription(self, description):
'''Set the short text description of this user.
Args:
description: The short text description of this user
'''
self._description = description
description = property(GetDescription, SetDescription,
doc='The short text description of this user.')
def GetUrl(self):
'''Get the homepage url of this user.
Returns:
The homepage url of this user
'''
return self._url
def SetUrl(self, url):
'''Set the homepage url of this user.
Args:
url: The homepage url of this user
'''
self._url = url
url = property(GetUrl, SetUrl,
doc='The homepage url of this user.')
def GetProfileImageUrl(self):
'''Get the url of the thumbnail of this user.
Returns:
The url of the thumbnail of this user
'''
return self._profile_image_url
def SetProfileImageUrl(self, profile_image_url):
'''Set the url of the thumbnail of this user.
Args:
profile_image_url: The url of the thumbnail of this user
'''
self._profile_image_url = profile_image_url
profile_image_url= property(GetProfileImageUrl, SetProfileImageUrl,
doc='The url of the thumbnail of this user.')
def GetProfileBackgroundTile(self):
'''Boolean for whether to tile the profile background image.
Returns:
True if the background is to be tiled, False if not, None if unset.
'''
return self._profile_background_tile
def SetProfileBackgroundTile(self, profile_background_tile):
'''Set the boolean flag for whether to tile the profile background image.
Args:
profile_background_tile: Boolean flag for whether to tile or not.
'''
self._profile_background_tile = profile_background_tile
profile_background_tile = property(GetProfileBackgroundTile, SetProfileBackgroundTile,
doc='Boolean for whether to tile the background image.')
def GetProfileBackgroundImageUrl(self):
return self._profile_background_image_url
def SetProfileBackgroundImageUrl(self, profile_background_image_url):
self._profile_background_image_url = profile_background_image_url
profile_background_image_url = property(GetProfileBackgroundImageUrl, SetProfileBackgroundImageUrl,
doc='The url of the profile background of this user.')
def GetProfileSidebarFillColor(self):
return self._profile_sidebar_fill_color
def SetProfileSidebarFillColor(self, profile_sidebar_fill_color):
self._profile_sidebar_fill_color = profile_sidebar_fill_color
profile_sidebar_fill_color = property(GetProfileSidebarFillColor, SetProfileSidebarFillColor)
def GetProfileBackgroundColor(self):
return self._profile_background_color
def SetProfileBackgroundColor(self, profile_background_color):
self._profile_background_color = profile_background_color
profile_background_color = property(GetProfileBackgroundColor, SetProfileBackgroundColor)
def GetProfileLinkColor(self):
return self._profile_link_color
def SetProfileLinkColor(self, profile_link_color):
self._profile_link_color = profile_link_color
profile_link_color = property(GetProfileLinkColor, SetProfileLinkColor)
def GetProfileTextColor(self):
return self._profile_text_color
def SetProfileTextColor(self, profile_text_color):
self._profile_text_color = profile_text_color
profile_text_color = property(GetProfileTextColor, SetProfileTextColor)
def GetProtected(self):
return self._protected
def SetProtected(self, protected):
self._protected = protected
protected = property(GetProtected, SetProtected)
def GetUtcOffset(self):
return self._utc_offset
def SetUtcOffset(self, utc_offset):
self._utc_offset = utc_offset
utc_offset = property(GetUtcOffset, SetUtcOffset)
def GetTimeZone(self):
'''Returns the current time zone string for the user.
Returns:
The descriptive time zone string for the user.
'''
return self._time_zone
def SetTimeZone(self, time_zone):
'''Sets the user's time zone string.
Args:
time_zone:
The descriptive time zone to assign for the user.
'''
self._time_zone = time_zone
time_zone = property(GetTimeZone, SetTimeZone)
def GetStatus(self):
'''Get the latest twitter.Status of this user.
Returns:
The latest twitter.Status of this user
'''
return self._status
def SetStatus(self, status):
'''Set the latest twitter.Status of this user.
Args:
status:
The latest twitter.Status of this user
'''
self._status = status
status = property(GetStatus, SetStatus,
doc='The latest twitter.Status of this user.')
def GetFriendsCount(self):
'''Get the friend count for this user.
Returns:
The number of users this user has befriended.
'''
return self._friends_count
def SetFriendsCount(self, count):
'''Set the friend count for this user.
Args:
count:
The number of users this user has befriended.
'''
self._friends_count = count
friends_count = property(GetFriendsCount, SetFriendsCount,
doc='The number of friends for this user.')
def GetListedCount(self):
'''Get the listed count for this user.
Returns:
The number of lists this user belongs to.
'''
return self._listed_count
def SetListedCount(self, count):
'''Set the listed count for this user.
Args:
count:
The number of lists this user belongs to.
'''
self._listed_count = count
listed_count = property(GetListedCount, SetListedCount,
doc='The number of lists this user belongs to.')
def GetFollowersCount(self):
'''Get the follower count for this user.
Returns:
The number of users following this user.
'''
return self._followers_count
def SetFollowersCount(self, count):
'''Set the follower count for this user.
Args:
count:
The number of users following this user.
'''
self._followers_count = count
followers_count = property(GetFollowersCount, SetFollowersCount,
doc='The number of users following this user.')
def GetStatusesCount(self):
'''Get the number of status updates for this user.
Returns:
The number of status updates for this user.
'''
return self._statuses_count
def SetStatusesCount(self, count):
'''Set the status update count for this user.
Args:
count:
The number of updates for this user.
'''
self._statuses_count = count
statuses_count = property(GetStatusesCount, SetStatusesCount,
doc='The number of updates for this user.')
def GetFavouritesCount(self):
'''Get the number of favourites for this user.
Returns:
The number of favourites for this user.
'''
return self._favourites_count
def SetFavouritesCount(self, count):
'''Set the favourite count for this user.
Args:
count:
The number of favourites for this user.
'''
self._favourites_count = count
favourites_count = property(GetFavouritesCount, SetFavouritesCount,
doc='The number of favourites for this user.')
def GetGeoEnabled(self):
'''Get the setting of geo_enabled for this user.
Returns:
True/False if Geo tagging is enabled
'''
return self._geo_enabled
def SetGeoEnabled(self, geo_enabled):
'''Set the latest twitter.geo_enabled of this user.
Args:
geo_enabled:
True/False if Geo tagging is to be enabled
'''
self._geo_enabled = geo_enabled
geo_enabled = property(GetGeoEnabled, SetGeoEnabled,
doc='The value of twitter.geo_enabled for this user.')
def GetVerified(self):
'''Get the setting of verified for this user.
Returns:
True/False if user is a verified account
'''
return self._verified
def SetVerified(self, verified):
'''Set twitter.verified for this user.
Args:
verified:
True/False if user is a verified account
'''
self._verified = verified
verified = property(GetVerified, SetVerified,
doc='The value of twitter.verified for this user.')
def GetLang(self):
'''Get the setting of lang for this user.
Returns:
language code of the user
'''
return self._lang
def SetLang(self, lang):
'''Set twitter.lang for this user.
Args:
lang:
language code for the user
'''
self._lang = lang
lang = property(GetLang, SetLang,
doc='The value of twitter.lang for this user.')
def GetNotifications(self):
'''Get the setting of notifications for this user.
Returns:
True/False for the notifications setting of the user
'''
return self._notifications
def SetNotifications(self, notifications):
'''Set twitter.notifications for this user.
Args:
notifications:
True/False notifications setting for the user
'''
self._notifications = notifications
notifications = property(GetNotifications, SetNotifications,
doc='The value of twitter.notifications for this user.')
def GetContributorsEnabled(self):
'''Get the setting of contributors_enabled for this user.
Returns:
True/False contributors_enabled of the user
'''
return self._contributors_enabled
def SetContributorsEnabled(self, contributors_enabled):
'''Set twitter.contributors_enabled for this user.
Args:
contributors_enabled:
True/False contributors_enabled setting for the user
'''
self._contributors_enabled = contributors_enabled
contributors_enabled = property(GetContributorsEnabled, SetContributorsEnabled,
doc='The value of twitter.contributors_enabled for this user.')
def GetCreatedAt(self):
'''Get the setting of created_at for this user.
Returns:
created_at value of the user
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set twitter.created_at for this user.
Args:
created_at:
created_at value for the user
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc='The value of twitter.created_at for this user.')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.name == other.name and \
self.screen_name == other.screen_name and \
self.location == other.location and \
self.description == other.description and \
self.profile_image_url == other.profile_image_url and \
self.profile_background_tile == other.profile_background_tile and \
self.profile_background_image_url == other.profile_background_image_url and \
self.profile_sidebar_fill_color == other.profile_sidebar_fill_color and \
self.profile_background_color == other.profile_background_color and \
self.profile_link_color == other.profile_link_color and \
self.profile_text_color == other.profile_text_color and \
self.protected == other.protected and \
self.utc_offset == other.utc_offset and \
self.time_zone == other.time_zone and \
self.url == other.url and \
self.statuses_count == other.statuses_count and \
self.followers_count == other.followers_count and \
self.favourites_count == other.favourites_count and \
self.friends_count == other.friends_count and \
self.status == other.status and \
self.geo_enabled == other.geo_enabled and \
self.verified == other.verified and \
self.lang == other.lang and \
self.notifications == other.notifications and \
self.contributors_enabled == other.contributors_enabled and \
self.created_at == other.created_at and \
self.listed_count == other.listed_count
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.User instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.User instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.User instance.
Returns:
A JSON string representation of this twitter.User instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.User instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.User instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.name:
data['name'] = self.name
if self.screen_name:
data['screen_name'] = self.screen_name
if self.location:
data['location'] = self.location
if self.description:
data['description'] = self.description
if self.profile_image_url:
data['profile_image_url'] = self.profile_image_url
if self.profile_background_tile is not None:
data['profile_background_tile'] = self.profile_background_tile
if self.profile_background_image_url:
data['profile_sidebar_fill_color'] = self.profile_background_image_url
if self.profile_background_color:
data['profile_background_color'] = self.profile_background_color
if self.profile_link_color:
data['profile_link_color'] = self.profile_link_color
if self.profile_text_color:
data['profile_text_color'] = self.profile_text_color
if self.protected is not None:
data['protected'] = self.protected
if self.utc_offset:
data['utc_offset'] = self.utc_offset
if self.time_zone:
data['time_zone'] = self.time_zone
if self.url:
data['url'] = self.url
if self.status:
data['status'] = self.status.AsDict()
if self.friends_count:
data['friends_count'] = self.friends_count
if self.followers_count:
data['followers_count'] = self.followers_count
if self.statuses_count:
data['statuses_count'] = self.statuses_count
if self.favourites_count:
data['favourites_count'] = self.favourites_count
if self.geo_enabled:
data['geo_enabled'] = self.geo_enabled
if self.verified:
data['verified'] = self.verified
if self.lang:
data['lang'] = self.lang
if self.notifications:
data['notifications'] = self.notifications
if self.contributors_enabled:
data['contributors_enabled'] = self.contributors_enabled
if self.created_at:
data['created_at'] = self.created_at
if self.listed_count:
data['listed_count'] = self.listed_count
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.User instance
'''
if 'status' in data:
status = Status.NewFromJsonDict(data['status'])
else:
status = None
return User(id=data.get('id', None),
name=data.get('name', None),
screen_name=data.get('screen_name', None),
location=data.get('location', None),
description=data.get('description', None),
statuses_count=data.get('statuses_count', None),
followers_count=data.get('followers_count', None),
favourites_count=data.get('favourites_count', None),
friends_count=data.get('friends_count', None),
profile_image_url=data.get('profile_image_url', None),
profile_background_tile = data.get('profile_background_tile', None),
profile_background_image_url = data.get('profile_background_image_url', None),
profile_sidebar_fill_color = data.get('profile_sidebar_fill_color', None),
profile_background_color = data.get('profile_background_color', None),
profile_link_color = data.get('profile_link_color', None),
profile_text_color = data.get('profile_text_color', None),
protected = data.get('protected', None),
utc_offset = data.get('utc_offset', None),
time_zone = data.get('time_zone', None),
url=data.get('url', None),
status=status,
geo_enabled=data.get('geo_enabled', None),
verified=data.get('verified', None),
lang=data.get('lang', None),
notifications=data.get('notifications', None),
contributors_enabled=data.get('contributors_enabled', None),
created_at=data.get('created_at', None),
listed_count=data.get('listed_count', None))
class List(object):
'''A class representing the List structure used by the twitter API.
The List structure exposes the following properties:
list.id
list.name
list.slug
list.description
list.full_name
list.mode
list.uri
list.member_count
list.subscriber_count
list.following
'''
def __init__(self,
id=None,
name=None,
slug=None,
description=None,
full_name=None,
mode=None,
uri=None,
member_count=None,
subscriber_count=None,
following=None,
user=None):
self.id = id
self.name = name
self.slug = slug
self.description = description
self.full_name = full_name
self.mode = mode
self.uri = uri
self.member_count = member_count
self.subscriber_count = subscriber_count
self.following = following
self.user = user
def GetId(self):
'''Get the unique id of this list.
Returns:
The unique id of this list
'''
return self._id
def SetId(self, id):
'''Set the unique id of this list.
Args:
id:
The unique id of this list.
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this list.')
def GetName(self):
'''Get the real name of this list.
Returns:
The real name of this list
'''
return self._name
def SetName(self, name):
'''Set the real name of this list.
Args:
name:
The real name of this list
'''
self._name = name
name = property(GetName, SetName,
doc='The real name of this list.')
def GetSlug(self):
'''Get the slug of this list.
Returns:
The slug of this list
'''
return self._slug
def SetSlug(self, slug):
'''Set the slug of this list.
Args:
slug:
The slug of this list.
'''
self._slug = slug
slug = property(GetSlug, SetSlug,
doc='The slug of this list.')
def GetDescription(self):
'''Get the description of this list.
Returns:
The description of this list
'''
return self._description
def SetDescription(self, description):
'''Set the description of this list.
Args:
description:
The description of this list.
'''
self._description = description
description = property(GetDescription, SetDescription,
doc='The description of this list.')
def GetFull_name(self):
'''Get the full_name of this list.
Returns:
The full_name of this list
'''
return self._full_name
def SetFull_name(self, full_name):
'''Set the full_name of this list.
Args:
full_name:
The full_name of this list.
'''
self._full_name = full_name
full_name = property(GetFull_name, SetFull_name,
doc='The full_name of this list.')
def GetMode(self):
'''Get the mode of this list.
Returns:
The mode of this list
'''
return self._mode
def SetMode(self, mode):
'''Set the mode of this list.
Args:
mode:
The mode of this list.
'''
self._mode = mode
mode = property(GetMode, SetMode,
doc='The mode of this list.')
def GetUri(self):
'''Get the uri of this list.
Returns:
The uri of this list
'''
return self._uri
def SetUri(self, uri):
'''Set the uri of this list.
Args:
uri:
The uri of this list.
'''
self._uri = uri
uri = property(GetUri, SetUri,
doc='The uri of this list.')
def GetMember_count(self):
'''Get the member_count of this list.
Returns:
The member_count of this list
'''
return self._member_count
def SetMember_count(self, member_count):
'''Set the member_count of this list.
Args:
member_count:
The member_count of this list.
'''
self._member_count = member_count
member_count = property(GetMember_count, SetMember_count,
doc='The member_count of this list.')
def GetSubscriber_count(self):
'''Get the subscriber_count of this list.
Returns:
The subscriber_count of this list
'''
return self._subscriber_count
def SetSubscriber_count(self, subscriber_count):
'''Set the subscriber_count of this list.
Args:
subscriber_count:
The subscriber_count of this list.
'''
self._subscriber_count = subscriber_count
subscriber_count = property(GetSubscriber_count, SetSubscriber_count,
doc='The subscriber_count of this list.')
def GetFollowing(self):
'''Get the following status of this list.
Returns:
The following status of this list
'''
return self._following
def SetFollowing(self, following):
'''Set the following status of this list.
Args:
following:
The following of this list.
'''
self._following = following
following = property(GetFollowing, SetFollowing,
doc='The following status of this list.')
def GetUser(self):
'''Get the user of this list.
Returns:
The owner of this list
'''
return self._user
def SetUser(self, user):
'''Set the user of this list.
Args:
user:
The owner of this list.
'''
self._user = user
user = property(GetUser, SetUser,
doc='The owner of this list.')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.name == other.name and \
self.slug == other.slug and \
self.description == other.description and \
self.full_name == other.full_name and \
self.mode == other.mode and \
self.uri == other.uri and \
self.member_count == other.member_count and \
self.subscriber_count == other.subscriber_count and \
self.following == other.following and \
self.user == other.user
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.List instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.List instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.List instance.
Returns:
A JSON string representation of this twitter.List instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.List instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.List instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.name:
data['name'] = self.name
if self.slug:
data['slug'] = self.slug
if self.description:
data['description'] = self.description
if self.full_name:
data['full_name'] = self.full_name
if self.mode:
data['mode'] = self.mode
if self.uri:
data['uri'] = self.uri
if self.member_count is not None:
data['member_count'] = self.member_count
if self.subscriber_count is not None:
data['subscriber_count'] = self.subscriber_count
if self.following is not None:
data['following'] = self.following
if self.user is not None:
data['user'] = self.user
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.List instance
'''
if 'user' in data:
user = User.NewFromJsonDict(data['user'])
else:
user = None
return List(id=data.get('id', None),
name=data.get('name', None),
slug=data.get('slug', None),
description=data.get('description', None),
full_name=data.get('full_name', None),
mode=data.get('mode', None),
uri=data.get('uri', None),
member_count=data.get('member_count', None),
subscriber_count=data.get('subscriber_count', None),
following=data.get('following', None),
user=user)
class DirectMessage(object):
'''A class representing the DirectMessage structure used by the twitter API.
The DirectMessage structure exposes the following properties:
direct_message.id
direct_message.created_at
direct_message.created_at_in_seconds # read only
direct_message.sender_id
direct_message.sender_screen_name
direct_message.recipient_id
direct_message.recipient_screen_name
direct_message.text
'''
def __init__(self,
id=None,
created_at=None,
sender_id=None,
sender_screen_name=None,
recipient_id=None,
recipient_screen_name=None,
text=None):
'''An object to hold a Twitter direct message.
This class is normally instantiated by the twitter.Api class and
returned in a sequence.
Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007"
Args:
id:
The unique id of this direct message. [Optional]
created_at:
The time this direct message was posted. [Optional]
sender_id:
The id of the twitter user that sent this message. [Optional]
sender_screen_name:
The name of the twitter user that sent this message. [Optional]
recipient_id:
The id of the twitter that received this message. [Optional]
recipient_screen_name:
The name of the twitter that received this message. [Optional]
text:
The text of this direct message. [Optional]
'''
self.id = id
self.created_at = created_at
self.sender_id = sender_id
self.sender_screen_name = sender_screen_name
self.recipient_id = recipient_id
self.recipient_screen_name = recipient_screen_name
self.text = text
def GetId(self):
'''Get the unique id of this direct message.
Returns:
The unique id of this direct message
'''
return self._id
def SetId(self, id):
'''Set the unique id of this direct message.
Args:
id:
The unique id of this direct message
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this direct message.')
def GetCreatedAt(self):
'''Get the time this direct message was posted.
Returns:
The time this direct message was posted
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set the time this direct message was posted.
Args:
created_at:
The time this direct message was created
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc='The time this direct message was posted.')
def GetCreatedAtInSeconds(self):
'''Get the time this direct message was posted, in seconds since the epoch.
Returns:
The time this direct message was posted, in seconds since the epoch.
'''
return calendar.timegm(rfc822.parsedate(self.created_at))
created_at_in_seconds = property(GetCreatedAtInSeconds,
doc="The time this direct message was "
"posted, in seconds since the epoch")
def GetSenderId(self):
'''Get the unique sender id of this direct message.
Returns:
The unique sender id of this direct message
'''
return self._sender_id
def SetSenderId(self, sender_id):
'''Set the unique sender id of this direct message.
Args:
sender_id:
The unique sender id of this direct message
'''
self._sender_id = sender_id
sender_id = property(GetSenderId, SetSenderId,
doc='The unique sender id of this direct message.')
def GetSenderScreenName(self):
'''Get the unique sender screen name of this direct message.
Returns:
The unique sender screen name of this direct message
'''
return self._sender_screen_name
def SetSenderScreenName(self, sender_screen_name):
'''Set the unique sender screen name of this direct message.
Args:
sender_screen_name:
The unique sender screen name of this direct message
'''
self._sender_screen_name = sender_screen_name
sender_screen_name = property(GetSenderScreenName, SetSenderScreenName,
doc='The unique sender screen name of this direct message.')
def GetRecipientId(self):
'''Get the unique recipient id of this direct message.
Returns:
The unique recipient id of this direct message
'''
return self._recipient_id
def SetRecipientId(self, recipient_id):
'''Set the unique recipient id of this direct message.
Args:
recipient_id:
The unique recipient id of this direct message
'''
self._recipient_id = recipient_id
recipient_id = property(GetRecipientId, SetRecipientId,
doc='The unique recipient id of this direct message.')
def GetRecipientScreenName(self):
'''Get the unique recipient screen name of this direct message.
Returns:
The unique recipient screen name of this direct message
'''
return self._recipient_screen_name
def SetRecipientScreenName(self, recipient_screen_name):
'''Set the unique recipient screen name of this direct message.
Args:
recipient_screen_name:
The unique recipient screen name of this direct message
'''
self._recipient_screen_name = recipient_screen_name
recipient_screen_name = property(GetRecipientScreenName, SetRecipientScreenName,
doc='The unique recipient screen name of this direct message.')
def GetText(self):
'''Get the text of this direct message.
Returns:
The text of this direct message.
'''
return self._text
def SetText(self, text):
'''Set the text of this direct message.
Args:
text:
The text of this direct message
'''
self._text = text
text = property(GetText, SetText,
doc='The text of this direct message')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.created_at == other.created_at and \
self.sender_id == other.sender_id and \
self.sender_screen_name == other.sender_screen_name and \
self.recipient_id == other.recipient_id and \
self.recipient_screen_name == other.recipient_screen_name and \
self.text == other.text
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.DirectMessage instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.DirectMessage instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.DirectMessage instance.
Returns:
A JSON string representation of this twitter.DirectMessage instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.DirectMessage instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.DirectMessage instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.created_at:
data['created_at'] = self.created_at
if self.sender_id:
data['sender_id'] = self.sender_id
if self.sender_screen_name:
data['sender_screen_name'] = self.sender_screen_name
if self.recipient_id:
data['recipient_id'] = self.recipient_id
if self.recipient_screen_name:
data['recipient_screen_name'] = self.recipient_screen_name
if self.text:
data['text'] = self.text
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.DirectMessage instance
'''
return DirectMessage(created_at=data.get('created_at', None),
recipient_id=data.get('recipient_id', None),
sender_id=data.get('sender_id', None),
text=data.get('text', None),
sender_screen_name=data.get('sender_screen_name', None),
id=data.get('id', None),
recipient_screen_name=data.get('recipient_screen_name', None))
class Hashtag(object):
''' A class represeinting a twitter hashtag
'''
def __init__(self,
text=None):
self.text = text
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Hashtag instance
'''
return Hashtag(text = data.get('text', None))
class Trend(object):
''' A class representing a trending topic
'''
def __init__(self, name=None, query=None, timestamp=None):
self.name = name
self.query = query
self.timestamp = timestamp
def __str__(self):
return 'Name: %s\nQuery: %s\nTimestamp: %s\n' % (self.name, self.query, self.timestamp)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.name == other.name and \
self.query == other.query and \
self.timestamp == other.timestamp
except AttributeError:
return False
@staticmethod
def NewFromJsonDict(data, timestamp = None):
'''Create a new instance based on a JSON dict
Args:
data:
A JSON dict
timestamp:
Gets set as the timestamp property of the new object
Returns:
A twitter.Trend object
'''
return Trend(name=data.get('name', None),
query=data.get('query', None),
timestamp=timestamp)
class Url(object):
'''A class representing an URL contained in a tweet'''
def __init__(self,
url=None,
expanded_url=None):
self.url = url
self.expanded_url = expanded_url
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Url instance
'''
return Url(url=data.get('url', None),
expanded_url=data.get('expanded_url', None))
class Api(object):
'''A python interface into the Twitter API
By default, the Api caches results for 1 minute.
Example usage:
To create an instance of the twitter.Api class, with no authentication:
>>> import twitter
>>> api = twitter.Api()
To fetch the most recently posted public twitter status messages:
>>> statuses = api.GetPublicTimeline()
>>> print [s.user.name for s in statuses]
[u'DeWitt', u'Kesuke Miyagi', u'ev', u'Buzz Andersen', u'Biz Stone'] #...
To fetch a single user's public status messages, where "user" is either
a Twitter "short name" or their user id.
>>> statuses = api.GetUserTimeline(user)
>>> print [s.text for s in statuses]
To use authentication, instantiate the twitter.Api class with a
consumer key and secret; and the oAuth key and secret:
>>> api = twitter.Api(consumer_key='twitter consumer key',
consumer_secret='twitter consumer secret',
access_token_key='the_key_given',
access_token_secret='the_key_secret')
To fetch your friends (after being authenticated):
>>> users = api.GetFriends()
>>> print [u.name for u in users]
To post a twitter status message (after being authenticated):
>>> status = api.PostUpdate('I love python-twitter!')
>>> print status.text
I love python-twitter!
There are many other methods, including:
>>> api.PostUpdates(status)
>>> api.PostDirectMessage(user, text)
>>> api.GetUser(user)
>>> api.GetReplies()
>>> api.GetUserTimeline(user)
>>> api.GetStatus(id)
>>> api.DestroyStatus(id)
>>> api.GetFriendsTimeline(user)
>>> api.GetFriends(user)
>>> api.GetFollowers()
>>> api.GetFeatured()
>>> api.GetDirectMessages()
>>> api.PostDirectMessage(user, text)
>>> api.DestroyDirectMessage(id)
>>> api.DestroyFriendship(user)
>>> api.CreateFriendship(user)
>>> api.GetUserByEmail(email)
>>> api.VerifyCredentials()
'''
DEFAULT_CACHE_TIMEOUT = 60 # cache for 1 minute
_API_REALM = 'Twitter API'
def __init__(self,
consumer_key=None,
consumer_secret=None,
access_token_key=None,
access_token_secret=None,
input_encoding=None,
request_headers=None,
cache=DEFAULT_CACHE,
shortner=None,
base_url=None,
use_gzip_compression=False,
debugHTTP=False):
'''Instantiate a new twitter.Api object.
Args:
consumer_key:
Your Twitter user's consumer_key.
consumer_secret:
Your Twitter user's consumer_secret.
access_token_key:
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret:
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
input_encoding:
The encoding used to encode input strings. [Optional]
request_header:
A dictionary of additional HTTP request headers. [Optional]
cache:
The cache instance to use. Defaults to DEFAULT_CACHE.
Use None to disable caching. [Optional]
shortner:
The shortner instance to use. Defaults to None.
See shorten_url.py for an example shortner. [Optional]
base_url:
The base URL to use to contact the Twitter API.
Defaults to https://api.twitter.com. [Optional]
use_gzip_compression:
Set to True to tell enable gzip compression for any call
made to Twitter. Defaults to False. [Optional]
debugHTTP:
Set to True to enable debug output from urllib2 when performing
any HTTP requests. Defaults to False. [Optional]
'''
self.SetCache(cache)
self._urllib = urllib2
self._cache_timeout = Api.DEFAULT_CACHE_TIMEOUT
self._input_encoding = input_encoding
self._use_gzip = use_gzip_compression
self._debugHTTP = debugHTTP
self._oauth_consumer = None
self._shortlink_size = 19
self._InitializeRequestHeaders(request_headers)
self._InitializeUserAgent()
self._InitializeDefaultParameters()
if base_url is None:
self.base_url = 'https://api.twitter.com/1'
else:
self.base_url = base_url
if consumer_key is not None and (access_token_key is None or
access_token_secret is None):
print >> sys.stderr, 'Twitter now requires an oAuth Access Token for API calls.'
print >> sys.stderr, 'If your using this library from a command line utility, please'
print >> sys.stderr, 'run the the included get_access_token.py tool to generate one.'
raise TwitterError('Twitter requires oAuth Access Token for all API access')
self.SetCredentials(consumer_key, consumer_secret, access_token_key, access_token_secret)
def SetCredentials(self,
consumer_key,
consumer_secret,
access_token_key=None,
access_token_secret=None):
'''Set the consumer_key and consumer_secret for this instance
Args:
consumer_key:
The consumer_key of the twitter account.
consumer_secret:
The consumer_secret for the twitter account.
access_token_key:
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret:
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
'''
self._consumer_key = consumer_key
self._consumer_secret = consumer_secret
self._access_token_key = access_token_key
self._access_token_secret = access_token_secret
self._oauth_consumer = None
if consumer_key is not None and consumer_secret is not None and \
access_token_key is not None and access_token_secret is not None:
self._signature_method_plaintext = oauth.SignatureMethod_PLAINTEXT()
self._signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
self._oauth_token = oauth.Token(key=access_token_key, secret=access_token_secret)
self._oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
def ClearCredentials(self):
'''Clear the any credentials for this instance
'''
self._consumer_key = None
self._consumer_secret = None
self._access_token_key = None
self._access_token_secret = None
self._oauth_consumer = None
def GetPublicTimeline(self,
since_id=None,
include_rts=None,
include_entities=None):
'''Fetch the sequence of public twitter.Status message for all users.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
include_rts:
If True, the timeline will contain native retweets (if they
exist) in addition to the standard stream of tweets. [Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
An sequence of twitter.Status instances, one for each message
'''
parameters = {}
if since_id:
parameters['since_id'] = since_id
if include_rts:
parameters['include_rts'] = 1
if include_entities:
parameters['include_entities'] = 1
url = '%s/statuses/public_timeline.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def FilterPublicTimeline(self,
term,
since_id=None):
'''Filter the public twitter timeline by a given search term on
the local machine.
Args:
term:
term to search by.
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message
containing the term
'''
statuses = self.GetPublicTimeline(since_id)
results = []
for s in statuses:
if s.text.lower().find(term.lower()) != -1:
results.append(s)
return results
def GetSearch(self,
term=None,
geocode=None,
since_id=None,
per_page=15,
page=1,
lang="en",
show_user="true",
query_users=False):
'''Return twitter search results for a given term.
Args:
term:
term to search by. Optional if you include geocode.
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
geocode:
geolocation information in the form (latitude, longitude, radius)
[Optional]
per_page:
number of results to return. Default is 15 [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
lang:
language for results. Default is English [Optional]
show_user:
prefixes screen name in status
query_users:
If set to False, then all users only have screen_name and
profile_image_url available.
If set to True, all information of users are available,
but it uses lots of request quota, one per status.
Returns:
A sequence of twitter.Status instances, one for each message containing
the term
'''
# Build request parameters
parameters = {}
if since_id:
parameters['since_id'] = since_id
if term is None and geocode is None:
return []
if term is not None:
parameters['q'] = term
if geocode is not None:
parameters['geocode'] = ','.join(map(str, geocode))
parameters['show_user'] = show_user
parameters['lang'] = lang
parameters['rpp'] = per_page
parameters['page'] = page
# Make and send requests
url = 'http://search.twitter.com/search.json'
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
results = []
for x in data['results']:
temp = Status.NewFromJsonDict(x)
if query_users:
# Build user object with new request
temp.user = self.GetUser(urllib.quote(x['from_user']))
else:
temp.user = User(screen_name=x['from_user'], profile_image_url=x['profile_image_url'])
results.append(temp)
# Return built list of statuses
return results # [Status.NewFromJsonDict(x) for x in data['results']]
def GetTrendsCurrent(self, exclude=None):
'''Get the current top trending topics
Args:
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains the twitter.
'''
parameters = {}
if exclude:
parameters['exclude'] = exclude
url = '%s/trends/current.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
trends = []
for t in data['trends']:
for item in data['trends'][t]:
trends.append(Trend.NewFromJsonDict(item, timestamp = t))
return trends
def GetTrendsWoeid(self, woeid, exclude=None):
'''Return the top 10 trending topics for a specific WOEID, if trending
information is available for it.
Args:
woeid:
the Yahoo! Where On Earth ID for a location.
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains a Trend.
'''
parameters = {}
if exclude:
parameters['exclude'] = exclude
url = '%s/trends/%s.json' % (self.base_url, woeid)
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
trends = []
timestamp = data[0]['as_of']
for trend in data[0]['trends']:
trends.append(Trend.NewFromJsonDict(trend, timestamp = timestamp))
return trends
def GetTrendsDaily(self, exclude=None, startdate=None):
'''Get the current top trending topics for each hour in a given day
Args:
startdate:
The start date for the report.
Should be in the format YYYY-MM-DD. [Optional]
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 24 entries. Each entry contains the twitter.
Trend elements that were trending at the corresponding hour of the day.
'''
parameters = {}
if exclude:
parameters['exclude'] = exclude
if not startdate:
startdate = time.strftime('%Y-%m-%d', time.gmtime())
parameters['date'] = startdate
url = '%s/trends/daily.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
trends = []
for i in xrange(24):
trends.append(None)
for t in data['trends']:
idx = int(time.strftime('%H', time.strptime(t, '%Y-%m-%d %H:%M')))
trends[idx] = [Trend.NewFromJsonDict(x, timestamp = t)
for x in data['trends'][t]]
return trends
def GetTrendsWeekly(self, exclude=None, startdate=None):
'''Get the top 30 trending topics for each day in a given week.
Args:
startdate:
The start date for the report.
Should be in the format YYYY-MM-DD. [Optional]
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with each entry contains the twitter.
Trend elements of trending topics for the corrsponding day of the week
'''
parameters = {}
if exclude:
parameters['exclude'] = exclude
if not startdate:
startdate = time.strftime('%Y-%m-%d', time.gmtime())
parameters['date'] = startdate
url = '%s/trends/weekly.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
trends = []
for i in xrange(7):
trends.append(None)
# use the epochs of the dates as keys for a dictionary
times = dict([(calendar.timegm(time.strptime(t, '%Y-%m-%d')),t)
for t in data['trends']])
cnt = 0
# create the resulting structure ordered by the epochs of the dates
for e in sorted(times.keys()):
trends[cnt] = [Trend.NewFromJsonDict(x, timestamp = times[e])
for x in data['trends'][times[e]]]
cnt +=1
return trends
def GetFriendsTimeline(self,
user=None,
count=None,
page=None,
since_id=None,
retweets=None,
include_entities=None):
'''Fetch the sequence of twitter.Status messages for a user's friends
The twitter.Api instance must be authenticated if the user is private.
Args:
user:
Specifies the ID or screen name of the user for whom to return
the friends_timeline. If not specified then the authenticated
user set in the twitter.Api instance will be used. [Optional]
count:
Specifies the number of statuses to retrieve. May not be
greater than 100. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
retweets:
If True, the timeline will contain native retweets. [Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message
'''
if not user and not self._oauth_consumer:
raise TwitterError("User must be specified if API is not authenticated.")
url = '%s/statuses/friends_timeline' % self.base_url
if user:
url = '%s/%s.json' % (url, user)
else:
url = '%s.json' % url
parameters = {}
if count is not None:
try:
if int(count) > 100:
raise TwitterError("'count' may not be greater than 100")
except ValueError:
raise TwitterError("'count' must be an integer")
parameters['count'] = count
if page is not None:
try:
parameters['page'] = int(page)
except ValueError:
raise TwitterError("'page' must be an integer")
if since_id:
parameters['since_id'] = since_id
if retweets:
parameters['include_rts'] = True
if include_entities:
parameters['include_entities'] = True
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetUserTimeline(self,
id=None,
user_id=None,
screen_name=None,
since_id=None,
max_id=None,
count=None,
page=None,
include_rts=None,
include_entities=None):
'''Fetch the sequence of public Status messages for a single user.
The twitter.Api instance must be authenticated if the user is private.
Args:
id:
Specifies the ID or screen name of the user for whom to return
the user_timeline. [Optional]
user_id:
Specfies the ID of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name. [Optional]
screen_name:
Specfies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID. [Optional]
count:
Specifies the number of statuses to retrieve. May not be
greater than 200. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
include_rts:
If True, the timeline will contain native retweets (if they
exist) in addition to the standard stream of tweets. [Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A sequence of Status instances, one for each message up to count
'''
parameters = {}
if id:
url = '%s/statuses/user_timeline/%s.json' % (self.base_url, id)
elif user_id:
url = '%s/statuses/user_timeline.json?user_id=%d' % (self.base_url, user_id)
elif screen_name:
url = ('%s/statuses/user_timeline.json?screen_name=%s' % (self.base_url,
screen_name))
elif not self._oauth_consumer:
raise TwitterError("User must be specified if API is not authenticated.")
else:
url = '%s/statuses/user_timeline.json' % self.base_url
if since_id:
try:
parameters['since_id'] = long(since_id)
except:
raise TwitterError("since_id must be an integer")
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if page:
try:
parameters['page'] = int(page)
except:
raise TwitterError("page must be an integer")
if include_rts:
parameters['include_rts'] = 1
if include_entities:
parameters['include_entities'] = 1
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetStatus(self, id, include_entities=None):
'''Returns a single status message.
The twitter.Api instance must be authenticated if the
status message is private.
Args:
id:
The numeric ID of the status you are trying to retrieve.
include_entities:
If True, each tweet will include a node called "entities".
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A twitter.Status instance representing that status message
'''
try:
if id:
long(id)
except:
raise TwitterError("id must be an long integer")
parameters = {}
if include_entities:
parameters['include_entities'] = 1
url = '%s/statuses/show/%s.json' % (self.base_url, id)
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def DestroyStatus(self, id):
'''Destroys the status specified by the required ID parameter.
The twitter.Api instance must be authenticated and the
authenticating user must be the author of the specified status.
Args:
id:
The numerical ID of the status you're trying to destroy.
Returns:
A twitter.Status instance representing the destroyed status message
'''
try:
if id:
long(id)
except:
raise TwitterError("id must be an integer")
url = '%s/statuses/destroy/%s.json' % (self.base_url, id)
json = self._FetchUrl(url, post_data={'id': id})
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
@classmethod
def _calculate_status_length(cls, status, linksize=19):
dummy_link_replacement = 'https://-%d-chars%s/' % (linksize, '-'*(linksize - 18))
shortened = ' '.join([x if not (x.startswith('http://') or
x.startswith('https://'))
else
dummy_link_replacement
for x in status.split(' ')])
return len(shortened)
def PostUpdate(self, status, in_reply_to_status_id=None):
'''Post a twitter status message from the authenticated user.
The twitter.Api instance must be authenticated.
Args:
status:
The message text to be posted.
Must be less than or equal to 140 characters.
in_reply_to_status_id:
The ID of an existing status that the status to be posted is
in reply to. This implicitly sets the in_reply_to_user_id
attribute of the resulting status to the user ID of the
message being replied to. Invalid/missing status IDs will be
ignored. [Optional]
Returns:
A twitter.Status instance representing the message posted.
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
url = '%s/statuses/update.json' % self.base_url
if isinstance(status, unicode) or self._input_encoding is None:
u_status = status
else:
u_status = unicode(status, self._input_encoding)
if self._calculate_status_length(u_status, self._shortlink_size) > CHARACTER_LIMIT:
raise TwitterError("Text must be less than or equal to %d characters. "
"Consider using PostUpdates." % CHARACTER_LIMIT)
data = {'status': status}
if in_reply_to_status_id:
data['in_reply_to_status_id'] = in_reply_to_status_id
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def PostUpdates(self, status, continuation=None, **kwargs):
'''Post one or more twitter status messages from the authenticated user.
Unlike api.PostUpdate, this method will post multiple status updates
if the message is longer than 140 characters.
The twitter.Api instance must be authenticated.
Args:
status:
The message text to be posted.
May be longer than 140 characters.
continuation:
The character string, if any, to be appended to all but the
last message. Note that Twitter strips trailing '...' strings
from messages. Consider using the unicode \u2026 character
(horizontal ellipsis) instead. [Defaults to None]
**kwargs:
See api.PostUpdate for a list of accepted parameters.
Returns:
A of list twitter.Status instance representing the messages posted.
'''
results = list()
if continuation is None:
continuation = ''
line_length = CHARACTER_LIMIT - len(continuation)
lines = textwrap.wrap(status, line_length)
for line in lines[0:-1]:
results.append(self.PostUpdate(line + continuation, **kwargs))
results.append(self.PostUpdate(lines[-1], **kwargs))
return results
def GetUserRetweets(self, count=None, since_id=None, max_id=None, include_entities=False):
'''Fetch the sequence of retweets made by a single user.
The twitter.Api instance must be authenticated.
Args:
count:
The number of status messages to retrieve. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message up to count
'''
url = '%s/statuses/retweeted_by_me.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if count is not None:
try:
if int(count) > 100:
raise TwitterError("'count' may not be greater than 100")
except ValueError:
raise TwitterError("'count' must be an integer")
if count:
parameters['count'] = count
if since_id:
parameters['since_id'] = since_id
if include_entities:
parameters['include_entities'] = True
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetReplies(self, since=None, since_id=None, page=None):
'''Get a sequence of status messages representing the 20 most
recent replies (status updates prefixed with @twitterID) to the
authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
since:
Returns:
A sequence of twitter.Status instances, one for each reply to the user.
'''
url = '%s/statuses/replies.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since:
parameters['since'] = since
if since_id:
parameters['since_id'] = since_id
if page:
parameters['page'] = page
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetRetweets(self, statusid):
'''Returns up to 100 of the first retweets of the tweet identified
by statusid
Args:
statusid:
The ID of the tweet for which retweets should be searched for
Returns:
A list of twitter.Status instances, which are retweets of statusid
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instsance must be authenticated.")
url = '%s/statuses/retweets/%s.json?include_entities=true&include_rts=true' % (self.base_url, statusid)
parameters = {}
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(s) for s in data]
def GetFriends(self, user=None, cursor=-1):
'''Fetch the sequence of twitter.User instances, one for each friend.
The twitter.Api instance must be authenticated.
Args:
user:
The twitter name or id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
Returns:
A sequence of twitter.User instances, one for each friend
'''
if not user and not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
if user:
url = '%s/statuses/friends/%s.json' % (self.base_url, user)
else:
url = '%s/statuses/friends.json' % self.base_url
parameters = {}
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [User.NewFromJsonDict(x) for x in data['users']]
def GetFriendIDs(self, user=None, cursor=-1):
'''Returns a list of twitter user id's for every person
the specified user is following.
Args:
user:
The id or screen_name of the user to retrieve the id list for
[Optional]
Returns:
A list of integers, one for each user id.
'''
if not user and not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
if user:
url = '%s/friends/ids/%s.json' % (self.base_url, user)
else:
url = '%s/friends/ids.json' % self.base_url
parameters = {}
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return data
def GetFollowerIDs(self, userid=None, cursor=-1):
'''Fetch the sequence of twitter.User instances, one for each follower
The twitter.Api instance must be authenticated.
Returns:
A sequence of twitter.User instances, one for each follower
'''
url = '%s/followers/ids.json' % self.base_url
parameters = {}
parameters['cursor'] = cursor
if userid:
parameters['user_id'] = userid
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return data
def GetFollowers(self, cursor=-1):
'''Fetch the sequence of twitter.User instances, one for each follower
The twitter.Api instance must be authenticated.
Args:
cursor:
Specifies the Twitter API Cursor location to start at. [Optional]
Note: there are pagination limits.
Returns:
A sequence of twitter.User instances, one for each follower
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/statuses/followers.json' % self.base_url
result = []
while True:
parameters = { 'cursor': cursor }
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
result += [User.NewFromJsonDict(x) for x in data['users']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
break
return result
def GetFeatured(self):
'''Fetch the sequence of twitter.User instances featured on twitter.com
The twitter.Api instance must be authenticated.
Returns:
A sequence of twitter.User instances
'''
url = '%s/statuses/featured.json' % self.base_url
json = self._FetchUrl(url)
data = self._ParseAndCheckTwitter(json)
return [User.NewFromJsonDict(x) for x in data]
def UsersLookup(self, user_id=None, screen_name=None, users=None):
'''Fetch extended information for the specified users.
Users may be specified either as lists of either user_ids,
screen_names, or twitter.User objects. The list of users that
are queried is the union of all specified parameters.
The twitter.Api instance must be authenticated.
Args:
user_id:
A list of user_ids to retrieve extended information.
[Optional]
screen_name:
A list of screen_names to retrieve extended information.
[Optional]
users:
A list of twitter.User objects to retrieve extended information.
[Optional]
Returns:
A list of twitter.User objects for the requested users
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
if not user_id and not screen_name and not users:
raise TwitterError("Specify at least on of user_id, screen_name, or users.")
url = '%s/users/lookup.json' % self.base_url
parameters = {}
uids = list()
if user_id:
uids.extend(user_id)
if users:
uids.extend([u.id for u in users])
if len(uids):
parameters['user_id'] = ','.join(["%s" % u for u in uids])
if screen_name:
parameters['screen_name'] = ','.join(screen_name)
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [User.NewFromJsonDict(u) for u in data]
def GetUser(self, user):
'''Returns a single user.
The twitter.Api instance must be authenticated.
Args:
user: The twitter name or id of the user to retrieve.
Returns:
A twitter.User instance representing that user
'''
url = '%s/users/show/%s.json' % (self.base_url, user)
json = self._FetchUrl(url)
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def GetDirectMessages(self, since=None, since_id=None, page=None):
'''Returns a list of the direct messages sent to the authenticating user.
The twitter.Api instance must be authenticated.
Args:
since:
Narrows the returned results to just those statuses created
after the specified HTTP-formatted date. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
Returns:
A sequence of twitter.DirectMessage instances
'''
url = '%s/direct_messages.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since:
parameters['since'] = since
if since_id:
parameters['since_id'] = since_id
if page:
parameters['page'] = page
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [DirectMessage.NewFromJsonDict(x) for x in data]
def PostDirectMessage(self, user, text):
'''Post a twitter direct message from the authenticated user
The twitter.Api instance must be authenticated.
Args:
user: The ID or screen name of the recipient user.
text: The message text to be posted. Must be less than 140 characters.
Returns:
A twitter.DirectMessage instance representing the message posted
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
url = '%s/direct_messages/new.json' % self.base_url
data = {'text': text, 'user': user}
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return DirectMessage.NewFromJsonDict(data)
def DestroyDirectMessage(self, id):
'''Destroys the direct message specified in the required ID parameter.
The twitter.Api instance must be authenticated, and the
authenticating user must be the recipient of the specified direct
message.
Args:
id: The id of the direct message to be destroyed
Returns:
A twitter.DirectMessage instance representing the message destroyed
'''
url = '%s/direct_messages/destroy/%s.json' % (self.base_url, id)
json = self._FetchUrl(url, post_data={'id': id})
data = self._ParseAndCheckTwitter(json)
return DirectMessage.NewFromJsonDict(data)
def CreateFriendship(self, user):
'''Befriends the user specified in the user parameter as the authenticating user.
The twitter.Api instance must be authenticated.
Args:
The ID or screen name of the user to befriend.
Returns:
A twitter.User instance representing the befriended user.
'''
url = '%s/friendships/create/%s.json' % (self.base_url, user)
json = self._FetchUrl(url, post_data={'user': user})
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def DestroyFriendship(self, user):
'''Discontinues friendship with the user specified in the user parameter.
The twitter.Api instance must be authenticated.
Args:
The ID or screen name of the user with whom to discontinue friendship.
Returns:
A twitter.User instance representing the discontinued friend.
'''
url = '%s/friendships/destroy/%s.json' % (self.base_url, user)
json = self._FetchUrl(url, post_data={'user': user})
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def CreateFavorite(self, status):
'''Favorites the status specified in the status parameter as the authenticating user.
Returns the favorite status when successful.
The twitter.Api instance must be authenticated.
Args:
The twitter.Status instance to mark as a favorite.
Returns:
A twitter.Status instance representing the newly-marked favorite.
'''
url = '%s/favorites/create/%s.json' % (self.base_url, status.id)
json = self._FetchUrl(url, post_data={'id': status.id})
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def DestroyFavorite(self, status):
'''Un-favorites the status specified in the ID parameter as the authenticating user.
Returns the un-favorited status in the requested format when successful.
The twitter.Api instance must be authenticated.
Args:
The twitter.Status to unmark as a favorite.
Returns:
A twitter.Status instance representing the newly-unmarked favorite.
'''
url = '%s/favorites/destroy/%s.json' % (self.base_url, status.id)
json = self._FetchUrl(url, post_data={'id': status.id})
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def GetFavorites(self,
user=None,
page=None):
'''Return a list of Status objects representing favorited tweets.
By default, returns the (up to) 20 most recent tweets for the
authenticated user.
Args:
user:
The twitter name or id of the user whose favorites you are fetching.
If not specified, defaults to the authenticated user. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
'''
parameters = {}
if page:
parameters['page'] = page
if user:
url = '%s/favorites/%s.json' % (self.base_url, user)
elif not user and not self._oauth_consumer:
raise TwitterError("User must be specified if API is not authenticated.")
else:
url = '%s/favorites.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetMentions(self,
since_id=None,
max_id=None,
page=None):
'''Returns the 20 most recent mentions (status containing @twitterID)
for the authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than
(that is, older than) the specified ID. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
Returns:
A sequence of twitter.Status instances, one for each mention of the user.
'''
url = '%s/statuses/mentions.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since_id:
parameters['since_id'] = since_id
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if page:
parameters['page'] = page
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def CreateList(self, user, name, mode=None, description=None):
'''Creates a new list with the give name
The twitter.Api instance must be authenticated.
Args:
user:
Twitter name to create the list for
name:
New name for the list
mode:
'public' or 'private'.
Defaults to 'public'. [Optional]
description:
Description of the list. [Optional]
Returns:
A twitter.List instance representing the new list
'''
url = '%s/%s/lists.json' % (self.base_url, user)
parameters = {'name': name}
if mode is not None:
parameters['mode'] = mode
if description is not None:
parameters['description'] = description
json = self._FetchUrl(url, post_data=parameters)
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def DestroyList(self, user, id):
'''Destroys the list from the given user
The twitter.Api instance must be authenticated.
Args:
user:
The user to remove the list from.
id:
The slug or id of the list to remove.
Returns:
A twitter.List instance representing the removed list.
'''
url = '%s/%s/lists/%s.json' % (self.base_url, user, id)
json = self._FetchUrl(url, post_data={'_method': 'DELETE'})
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def CreateSubscription(self, owner, list):
'''Creates a subscription to a list by the authenticated user
The twitter.Api instance must be authenticated.
Args:
owner:
User name or id of the owner of the list being subscribed to.
list:
The slug or list id to subscribe the user to
Returns:
A twitter.List instance representing the list subscribed to
'''
url = '%s/%s/%s/subscribers.json' % (self.base_url, owner, list)
json = self._FetchUrl(url, post_data={'list_id': list})
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def DestroySubscription(self, owner, list):
'''Destroys the subscription to a list for the authenticated user
The twitter.Api instance must be authenticated.
Args:
owner:
The user id or screen name of the user that owns the
list that is to be unsubscribed from
list:
The slug or list id of the list to unsubscribe from
Returns:
A twitter.List instance representing the removed list.
'''
url = '%s/%s/%s/subscribers.json' % (self.base_url, owner, list)
json = self._FetchUrl(url, post_data={'_method': 'DELETE', 'list_id': list})
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def GetSubscriptions(self, user, cursor=-1):
'''Fetch the sequence of Lists that the given user is subscribed to
The twitter.Api instance must be authenticated.
Args:
user:
The twitter name or id of the user
cursor:
"page" value that Twitter will use to start building the
list sequence from. -1 to start at the beginning.
Twitter will return in the result the values for next_cursor
and previous_cursor. [Optional]
Returns:
A sequence of twitter.List instances, one for each list
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/%s/lists/subscriptions.json' % (self.base_url, user)
parameters = {}
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [List.NewFromJsonDict(x) for x in data['lists']]
def GetLists(self, user, cursor=-1):
'''Fetch the sequence of lists for a user.
The twitter.Api instance must be authenticated.
Args:
user:
The twitter name or id of the user whose friends you are fetching.
If the passed in user is the same as the authenticated user
then you will also receive private list data.
cursor:
"page" value that Twitter will use to start building the
list sequence from. -1 to start at the beginning.
Twitter will return in the result the values for next_cursor
and previous_cursor. [Optional]
Returns:
A sequence of twitter.List instances, one for each list
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/%s/lists.json' % (self.base_url, user)
parameters = {}
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [List.NewFromJsonDict(x) for x in data['lists']]
def GetUserByEmail(self, email):
'''Returns a single user by email address.
Args:
email:
The email of the user to retrieve.
Returns:
A twitter.User instance representing that user
'''
url = '%s/users/show.json?email=%s' % (self.base_url, email)
json = self._FetchUrl(url)
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def VerifyCredentials(self):
'''Returns a twitter.User instance if the authenticating user is valid.
Returns:
A twitter.User instance representing that user if the
credentials are valid, None otherwise.
'''
if not self._oauth_consumer:
raise TwitterError("Api instance must first be given user credentials.")
url = '%s/account/verify_credentials.json' % self.base_url
try:
json = self._FetchUrl(url, no_cache=True)
except urllib2.HTTPError, http_error:
if http_error.code == httplib.UNAUTHORIZED:
return None
else:
raise http_error
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def SetCache(self, cache):
'''Override the default cache. Set to None to prevent caching.
Args:
cache:
An instance that supports the same API as the twitter._FileCache
'''
if cache == DEFAULT_CACHE:
self._cache = _FileCache()
else:
self._cache = cache
def SetUrllib(self, urllib):
'''Override the default urllib implementation.
Args:
urllib:
An instance that supports the same API as the urllib2 module
'''
self._urllib = urllib
def SetCacheTimeout(self, cache_timeout):
'''Override the default cache timeout.
Args:
cache_timeout:
Time, in seconds, that responses should be reused.
'''
self._cache_timeout = cache_timeout
def SetUserAgent(self, user_agent):
'''Override the default user agent
Args:
user_agent:
A string that should be send to the server as the User-agent
'''
self._request_headers['User-Agent'] = user_agent
def SetXTwitterHeaders(self, client, url, version):
'''Set the X-Twitter HTTP headers that will be sent to the server.
Args:
client:
The client name as a string. Will be sent to the server as
the 'X-Twitter-Client' header.
url:
The URL of the meta.xml as a string. Will be sent to the server
as the 'X-Twitter-Client-URL' header.
version:
The client version as a string. Will be sent to the server
as the 'X-Twitter-Client-Version' header.
'''
self._request_headers['X-Twitter-Client'] = client
self._request_headers['X-Twitter-Client-URL'] = url
self._request_headers['X-Twitter-Client-Version'] = version
def SetSource(self, source):
'''Suggest the "from source" value to be displayed on the Twitter web site.
The value of the 'source' parameter must be first recognized by
the Twitter server. New source values are authorized on a case by
case basis by the Twitter development team.
Args:
source:
The source name as a string. Will be sent to the server as
the 'source' parameter.
'''
self._default_params['source'] = source
def GetRateLimitStatus(self):
'''Fetch the rate limit status for the currently authorized user.
Returns:
A dictionary containing the time the limit will reset (reset_time),
the number of remaining hits allowed before the reset (remaining_hits),
the number of hits allowed in a 60-minute period (hourly_limit), and
the time of the reset in seconds since The Epoch (reset_time_in_seconds).
'''
url = '%s/account/rate_limit_status.json' % self.base_url
json = self._FetchUrl(url, no_cache=True)
data = self._ParseAndCheckTwitter(json)
return data
def MaximumHitFrequency(self):
'''Determines the minimum number of seconds that a program must wait
before hitting the server again without exceeding the rate_limit
imposed for the currently authenticated user.
Returns:
The minimum second interval that a program must use so as to not
exceed the rate_limit imposed for the user.
'''
rate_status = self.GetRateLimitStatus()
reset_time = rate_status.get('reset_time', None)
limit = rate_status.get('remaining_hits', None)
if reset_time:
# put the reset time into a datetime object
reset = datetime.datetime(*rfc822.parsedate(reset_time)[:7])
# find the difference in time between now and the reset time + 1 hour
delta = reset + datetime.timedelta(hours=1) - datetime.datetime.utcnow()
if not limit:
return int(delta.seconds)
# determine the minimum number of seconds allowed as a regular interval
max_frequency = int(delta.seconds / limit) + 1
# return the number of seconds
return max_frequency
return 60
def _BuildUrl(self, url, path_elements=None, extra_params=None):
# Break url into consituent parts
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
# Add any additional path elements to the path
if path_elements:
# Filter out the path elements that have a value of None
p = [i for i in path_elements if i]
if not path.endswith('/'):
path += '/'
path += '/'.join(p)
# Add any additional query parameters to the query string
if extra_params and len(extra_params) > 0:
extra_query = self._EncodeParameters(extra_params)
# Add it to the existing query
if query:
query += '&' + extra_query
else:
query = extra_query
# Return the rebuilt URL
return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
def _InitializeRequestHeaders(self, request_headers):
if request_headers:
self._request_headers = request_headers
else:
self._request_headers = {}
def _InitializeUserAgent(self):
user_agent = 'Python-urllib/%s (python-twitter/%s)' % \
(self._urllib.__version__, __version__)
self.SetUserAgent(user_agent)
def _InitializeDefaultParameters(self):
self._default_params = {}
def _DecompressGzippedResponse(self, response):
raw_data = response.read()
if response.headers.get('content-encoding', None) == 'gzip':
url_data = gzip.GzipFile(fileobj=StringIO.StringIO(raw_data)).read()
else:
url_data = raw_data
return url_data
def _Encode(self, s):
if self._input_encoding:
return unicode(s, self._input_encoding).encode('utf-8')
else:
return unicode(s).encode('utf-8')
def _EncodeParameters(self, parameters):
'''Return a string in key=value&key=value form
Values of None are not included in the output string.
Args:
parameters:
A dict of (key, value) tuples, where value is encoded as
specified by self._encoding
Returns:
A URL-encoded string in "key=value&key=value" form
'''
if parameters is None:
return None
else:
return urllib.urlencode(dict([(k, self._Encode(v)) for k, v in parameters.items() if v is not None]))
def _EncodePostData(self, post_data):
'''Return a string in key=value&key=value form
Values are assumed to be encoded in the format specified by self._encoding,
and are subsequently URL encoded.
Args:
post_data:
A dict of (key, value) tuples, where value is encoded as
specified by self._encoding
Returns:
A URL-encoded string in "key=value&key=value" form
'''
if post_data is None:
return None
else:
return urllib.urlencode(dict([(k, self._Encode(v)) for k, v in post_data.items()]))
def _ParseAndCheckTwitter(self, json):
"""Try and parse the JSON returned from Twitter and return
an empty dictionary if there is any error. This is a purely
defensive check because during some Twitter network outages
it will return an HTML failwhale page."""
try:
data = simplejson.loads(json)
self._CheckForTwitterError(data)
except ValueError:
if "<title>Twitter / Over capacity</title>" in json:
raise TwitterError("Capacity Error")
if "<title>Twitter / Error</title>" in json:
raise TwitterError("Technical Error")
raise TwitterError("json decoding")
return data
def _CheckForTwitterError(self, data):
"""Raises a TwitterError if twitter returns an error message.
Args:
data:
A python dict created from the Twitter json response
Raises:
TwitterError wrapping the twitter error message if one exists.
"""
# Twitter errors are relatively unlikely, so it is faster
# to check first, rather than try and catch the exception
if 'error' in data:
raise TwitterError(data['error'])
if 'errors' in data:
raise TwitterError(data['errors'])
def _FetchUrl(self,
url,
post_data=None,
parameters=None,
no_cache=None,
use_gzip_compression=None):
'''Fetch a URL, optionally caching for a specified time.
Args:
url:
The URL to retrieve
post_data:
A dict of (str, unicode) key/value pairs.
If set, POST will be used.
parameters:
A dict whose key/value pairs should encoded and added
to the query string. [Optional]
no_cache:
If true, overrides the cache on the current request
use_gzip_compression:
If True, tells the server to gzip-compress the response.
It does not apply to POST requests.
Defaults to None, which will get the value to use from
the instance variable self._use_gzip [Optional]
Returns:
A string containing the body of the response.
'''
# Build the extra parameters dict
extra_params = {}
if self._default_params:
extra_params.update(self._default_params)
if parameters:
extra_params.update(parameters)
if post_data:
http_method = "POST"
else:
http_method = "GET"
if self._debugHTTP:
_debug = 1
else:
_debug = 0
http_handler = self._urllib.HTTPHandler(debuglevel=_debug)
https_handler = self._urllib.HTTPSHandler(debuglevel=_debug)
opener = self._urllib.OpenerDirector()
opener.add_handler(http_handler)
opener.add_handler(https_handler)
if use_gzip_compression is None:
use_gzip = self._use_gzip
else:
use_gzip = use_gzip_compression
# Set up compression
if use_gzip and not post_data:
opener.addheaders.append(('Accept-Encoding', 'gzip'))
if self._oauth_consumer is not None:
if post_data and http_method == "POST":
parameters = post_data.copy()
req = oauth.Request.from_consumer_and_token(self._oauth_consumer,
token=self._oauth_token,
http_method=http_method,
http_url=url, parameters=parameters)
req.sign_request(self._signature_method_hmac_sha1, self._oauth_consumer, self._oauth_token)
headers = req.to_header()
if http_method == "POST":
encoded_post_data = req.to_postdata()
else:
encoded_post_data = None
url = req.to_url()
else:
url = self._BuildUrl(url, extra_params=extra_params)
encoded_post_data = self._EncodePostData(post_data)
# Open and return the URL immediately if we're not going to cache
if encoded_post_data or no_cache or not self._cache or not self._cache_timeout:
response = opener.open(url, encoded_post_data)
url_data = self._DecompressGzippedResponse(response)
opener.close()
else:
# Unique keys are a combination of the url and the oAuth Consumer Key
if self._consumer_key:
key = self._consumer_key + ':' + url
else:
key = url
# See if it has been cached before
last_cached = self._cache.GetCachedTime(key)
# If the cached version is outdated then fetch another and store it
if not last_cached or time.time() >= last_cached + self._cache_timeout:
try:
response = opener.open(url, encoded_post_data)
url_data = self._DecompressGzippedResponse(response)
self._cache.Set(key, url_data)
except urllib2.HTTPError, e:
print e
opener.close()
else:
url_data = self._cache.Get(key)
# Always return the latest version
return url_data
class _FileCacheError(Exception):
'''Base exception class for FileCache related errors'''
class _FileCache(object):
DEPTH = 3
def __init__(self,root_directory=None):
self._InitializeRootDirectory(root_directory)
def Get(self,key):
path = self._GetPath(key)
if os.path.exists(path):
return open(path).read()
else:
return None
def Set(self,key,data):
path = self._GetPath(key)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.isdir(directory):
raise _FileCacheError('%s exists but is not a directory' % directory)
temp_fd, temp_path = tempfile.mkstemp()
temp_fp = os.fdopen(temp_fd, 'w')
temp_fp.write(data)
temp_fp.close()
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory))
if os.path.exists(path):
os.remove(path)
os.rename(temp_path, path)
def Remove(self,key):
path = self._GetPath(key)
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory ))
if os.path.exists(path):
os.remove(path)
def GetCachedTime(self,key):
path = self._GetPath(key)
if os.path.exists(path):
return os.path.getmtime(path)
else:
return None
def _GetUsername(self):
'''Attempt to find the username in a cross-platform fashion.'''
try:
return os.getenv('USER') or \
os.getenv('LOGNAME') or \
os.getenv('USERNAME') or \
os.getlogin() or \
'nobody'
except (AttributeError, IOError, OSError), e:
return 'nobody'
def _GetTmpCachePath(self):
username = self._GetUsername()
cache_directory = 'python.cache_' + username
return os.path.join(tempfile.gettempdir(), cache_directory)
def _InitializeRootDirectory(self, root_directory):
if not root_directory:
root_directory = self._GetTmpCachePath()
root_directory = os.path.abspath(root_directory)
if not os.path.exists(root_directory):
os.mkdir(root_directory)
if not os.path.isdir(root_directory):
raise _FileCacheError('%s exists but is not a directory' %
root_directory)
self._root_directory = root_directory
def _GetPath(self,key):
try:
hashed_key = md5(key).hexdigest()
except TypeError:
hashed_key = md5.new(key).hexdigest()
return os.path.join(self._root_directory,
self._GetPrefix(hashed_key),
hashed_key)
def _GetPrefix(self,hashed_key):
return os.path.sep.join(hashed_key[0:_FileCache.DEPTH])
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-#
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit tests for the twitter.py library'''
__author__ = 'python-twitter@googlegroups.com'
import os
import simplejson
import time
import calendar
import unittest
import urllib
import twitter
class StatusTest(unittest.TestCase):
SAMPLE_JSON = '''{"created_at": "Fri Jan 26 23:17:14 +0000 2007", "id": 4391023, "text": "A l\u00e9gp\u00e1rn\u00e1s haj\u00f3m tele van angoln\u00e1kkal.", "user": {"description": "Canvas. JC Penny. Three ninety-eight.", "id": 718443, "location": "Okinawa, Japan", "name": "Kesuke Miyagi", "profile_image_url": "https://twitter.com/system/user/profile_image/718443/normal/kesuke.png", "screen_name": "kesuke", "url": "https://twitter.com/kesuke"}}'''
def _GetSampleUser(self):
return twitter.User(id=718443,
name='Kesuke Miyagi',
screen_name='kesuke',
description=u'Canvas. JC Penny. Three ninety-eight.',
location='Okinawa, Japan',
url='https://twitter.com/kesuke',
profile_image_url='https://twitter.com/system/user/pro'
'file_image/718443/normal/kesuke.pn'
'g')
def _GetSampleStatus(self):
return twitter.Status(created_at='Fri Jan 26 23:17:14 +0000 2007',
id=4391023,
text=u'A légpárnás hajóm tele van angolnákkal.',
user=self._GetSampleUser())
def testInit(self):
'''Test the twitter.Status constructor'''
status = twitter.Status(created_at='Fri Jan 26 23:17:14 +0000 2007',
id=4391023,
text=u'A légpárnás hajóm tele van angolnákkal.',
user=self._GetSampleUser())
def testGettersAndSetters(self):
'''Test all of the twitter.Status getters and setters'''
status = twitter.Status()
status.SetId(4391023)
self.assertEqual(4391023, status.GetId())
created_at = calendar.timegm((2007, 1, 26, 23, 17, 14, -1, -1, -1))
status.SetCreatedAt('Fri Jan 26 23:17:14 +0000 2007')
self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', status.GetCreatedAt())
self.assertEqual(created_at, status.GetCreatedAtInSeconds())
status.SetNow(created_at + 10)
self.assertEqual("about 10 seconds ago", status.GetRelativeCreatedAt())
status.SetText(u'A légpárnás hajóm tele van angolnákkal.')
self.assertEqual(u'A légpárnás hajóm tele van angolnákkal.',
status.GetText())
status.SetUser(self._GetSampleUser())
self.assertEqual(718443, status.GetUser().id)
def testProperties(self):
'''Test all of the twitter.Status properties'''
status = twitter.Status()
status.id = 1
self.assertEqual(1, status.id)
created_at = calendar.timegm((2007, 1, 26, 23, 17, 14, -1, -1, -1))
status.created_at = 'Fri Jan 26 23:17:14 +0000 2007'
self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', status.created_at)
self.assertEqual(created_at, status.created_at_in_seconds)
status.now = created_at + 10
self.assertEqual('about 10 seconds ago', status.relative_created_at)
status.user = self._GetSampleUser()
self.assertEqual(718443, status.user.id)
def _ParseDate(self, string):
return calendar.timegm(time.strptime(string, '%b %d %H:%M:%S %Y'))
def testRelativeCreatedAt(self):
'''Test various permutations of Status relative_created_at'''
status = twitter.Status(created_at='Fri Jan 01 12:00:00 +0000 2007')
status.now = self._ParseDate('Jan 01 12:00:00 2007')
self.assertEqual('about a second ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:00:01 2007')
self.assertEqual('about a second ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:00:02 2007')
self.assertEqual('about 2 seconds ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:00:05 2007')
self.assertEqual('about 5 seconds ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:00:50 2007')
self.assertEqual('about a minute ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:01:00 2007')
self.assertEqual('about a minute ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:01:10 2007')
self.assertEqual('about a minute ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:02:00 2007')
self.assertEqual('about 2 minutes ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:31:50 2007')
self.assertEqual('about 31 minutes ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:50:00 2007')
self.assertEqual('about an hour ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 13:00:00 2007')
self.assertEqual('about an hour ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 13:10:00 2007')
self.assertEqual('about an hour ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 14:00:00 2007')
self.assertEqual('about 2 hours ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 19:00:00 2007')
self.assertEqual('about 7 hours ago', status.relative_created_at)
status.now = self._ParseDate('Jan 02 11:30:00 2007')
self.assertEqual('about a day ago', status.relative_created_at)
status.now = self._ParseDate('Jan 04 12:00:00 2007')
self.assertEqual('about 3 days ago', status.relative_created_at)
status.now = self._ParseDate('Feb 04 12:00:00 2007')
self.assertEqual('about 34 days ago', status.relative_created_at)
def testAsJsonString(self):
'''Test the twitter.Status AsJsonString method'''
self.assertEqual(StatusTest.SAMPLE_JSON,
self._GetSampleStatus().AsJsonString())
def testAsDict(self):
'''Test the twitter.Status AsDict method'''
status = self._GetSampleStatus()
data = status.AsDict()
self.assertEqual(4391023, data['id'])
self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', data['created_at'])
self.assertEqual(u'A légpárnás hajóm tele van angolnákkal.', data['text'])
self.assertEqual(718443, data['user']['id'])
def testEq(self):
'''Test the twitter.Status __eq__ method'''
status = twitter.Status()
status.created_at = 'Fri Jan 26 23:17:14 +0000 2007'
status.id = 4391023
status.text = u'A légpárnás hajóm tele van angolnákkal.'
status.user = self._GetSampleUser()
self.assertEqual(status, self._GetSampleStatus())
def testNewFromJsonDict(self):
'''Test the twitter.Status NewFromJsonDict method'''
data = simplejson.loads(StatusTest.SAMPLE_JSON)
status = twitter.Status.NewFromJsonDict(data)
self.assertEqual(self._GetSampleStatus(), status)
class UserTest(unittest.TestCase):
SAMPLE_JSON = '''{"description": "Indeterminate things", "id": 673483, "location": "San Francisco, CA", "name": "DeWitt", "profile_image_url": "https://twitter.com/system/user/profile_image/673483/normal/me.jpg", "screen_name": "dewitt", "status": {"created_at": "Fri Jan 26 17:28:19 +0000 2007", "id": 4212713, "text": "\\"Select all\\" and archive your Gmail inbox. The page loads so much faster!"}, "url": "http://unto.net/"}'''
def _GetSampleStatus(self):
return twitter.Status(created_at='Fri Jan 26 17:28:19 +0000 2007',
id=4212713,
text='"Select all" and archive your Gmail inbox. '
' The page loads so much faster!')
def _GetSampleUser(self):
return twitter.User(id=673483,
name='DeWitt',
screen_name='dewitt',
description=u'Indeterminate things',
location='San Francisco, CA',
url='http://unto.net/',
profile_image_url='https://twitter.com/system/user/prof'
'ile_image/673483/normal/me.jpg',
status=self._GetSampleStatus())
def testInit(self):
'''Test the twitter.User constructor'''
user = twitter.User(id=673483,
name='DeWitt',
screen_name='dewitt',
description=u'Indeterminate things',
url='https://twitter.com/dewitt',
profile_image_url='https://twitter.com/system/user/prof'
'ile_image/673483/normal/me.jpg',
status=self._GetSampleStatus())
def testGettersAndSetters(self):
'''Test all of the twitter.User getters and setters'''
user = twitter.User()
user.SetId(673483)
self.assertEqual(673483, user.GetId())
user.SetName('DeWitt')
self.assertEqual('DeWitt', user.GetName())
user.SetScreenName('dewitt')
self.assertEqual('dewitt', user.GetScreenName())
user.SetDescription('Indeterminate things')
self.assertEqual('Indeterminate things', user.GetDescription())
user.SetLocation('San Francisco, CA')
self.assertEqual('San Francisco, CA', user.GetLocation())
user.SetProfileImageUrl('https://twitter.com/system/user/profile_im'
'age/673483/normal/me.jpg')
self.assertEqual('https://twitter.com/system/user/profile_image/673'
'483/normal/me.jpg', user.GetProfileImageUrl())
user.SetStatus(self._GetSampleStatus())
self.assertEqual(4212713, user.GetStatus().id)
def testProperties(self):
'''Test all of the twitter.User properties'''
user = twitter.User()
user.id = 673483
self.assertEqual(673483, user.id)
user.name = 'DeWitt'
self.assertEqual('DeWitt', user.name)
user.screen_name = 'dewitt'
self.assertEqual('dewitt', user.screen_name)
user.description = 'Indeterminate things'
self.assertEqual('Indeterminate things', user.description)
user.location = 'San Francisco, CA'
self.assertEqual('San Francisco, CA', user.location)
user.profile_image_url = 'https://twitter.com/system/user/profile_i' \
'mage/673483/normal/me.jpg'
self.assertEqual('https://twitter.com/system/user/profile_image/6734'
'83/normal/me.jpg', user.profile_image_url)
self.status = self._GetSampleStatus()
self.assertEqual(4212713, self.status.id)
def testAsJsonString(self):
'''Test the twitter.User AsJsonString method'''
self.assertEqual(UserTest.SAMPLE_JSON,
self._GetSampleUser().AsJsonString())
def testAsDict(self):
'''Test the twitter.User AsDict method'''
user = self._GetSampleUser()
data = user.AsDict()
self.assertEqual(673483, data['id'])
self.assertEqual('DeWitt', data['name'])
self.assertEqual('dewitt', data['screen_name'])
self.assertEqual('Indeterminate things', data['description'])
self.assertEqual('San Francisco, CA', data['location'])
self.assertEqual('https://twitter.com/system/user/profile_image/6734'
'83/normal/me.jpg', data['profile_image_url'])
self.assertEqual('http://unto.net/', data['url'])
self.assertEqual(4212713, data['status']['id'])
def testEq(self):
'''Test the twitter.User __eq__ method'''
user = twitter.User()
user.id = 673483
user.name = 'DeWitt'
user.screen_name = 'dewitt'
user.description = 'Indeterminate things'
user.location = 'San Francisco, CA'
user.profile_image_url = 'https://twitter.com/system/user/profile_image/67' \
'3483/normal/me.jpg'
user.url = 'http://unto.net/'
user.status = self._GetSampleStatus()
self.assertEqual(user, self._GetSampleUser())
def testNewFromJsonDict(self):
'''Test the twitter.User NewFromJsonDict method'''
data = simplejson.loads(UserTest.SAMPLE_JSON)
user = twitter.User.NewFromJsonDict(data)
self.assertEqual(self._GetSampleUser(), user)
class TrendTest(unittest.TestCase):
SAMPLE_JSON = '''{"name": "Kesuke Miyagi", "query": "Kesuke Miyagi"}'''
def _GetSampleTrend(self):
return twitter.Trend(name='Kesuke Miyagi',
query='Kesuke Miyagi',
timestamp='Fri Jan 26 23:17:14 +0000 2007')
def testInit(self):
'''Test the twitter.Trend constructor'''
trend = twitter.Trend(name='Kesuke Miyagi',
query='Kesuke Miyagi',
timestamp='Fri Jan 26 23:17:14 +0000 2007')
def testProperties(self):
'''Test all of the twitter.Trend properties'''
trend = twitter.Trend()
trend.name = 'Kesuke Miyagi'
self.assertEqual('Kesuke Miyagi', trend.name)
trend.query = 'Kesuke Miyagi'
self.assertEqual('Kesuke Miyagi', trend.query)
trend.timestamp = 'Fri Jan 26 23:17:14 +0000 2007'
self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', trend.timestamp)
def testNewFromJsonDict(self):
'''Test the twitter.Trend NewFromJsonDict method'''
data = simplejson.loads(TrendTest.SAMPLE_JSON)
trend = twitter.Trend.NewFromJsonDict(data, timestamp='Fri Jan 26 23:17:14 +0000 2007')
self.assertEqual(self._GetSampleTrend(), trend)
def testEq(self):
'''Test the twitter.Trend __eq__ method'''
trend = twitter.Trend()
trend.name = 'Kesuke Miyagi'
trend.query = 'Kesuke Miyagi'
trend.timestamp = 'Fri Jan 26 23:17:14 +0000 2007'
self.assertEqual(trend, self._GetSampleTrend())
class FileCacheTest(unittest.TestCase):
def testInit(self):
"""Test the twitter._FileCache constructor"""
cache = twitter._FileCache()
self.assert_(cache is not None, 'cache is None')
def testSet(self):
"""Test the twitter._FileCache.Set method"""
cache = twitter._FileCache()
cache.Set("foo",'Hello World!')
cache.Remove("foo")
def testRemove(self):
"""Test the twitter._FileCache.Remove method"""
cache = twitter._FileCache()
cache.Set("foo",'Hello World!')
cache.Remove("foo")
data = cache.Get("foo")
self.assertEqual(data, None, 'data is not None')
def testGet(self):
"""Test the twitter._FileCache.Get method"""
cache = twitter._FileCache()
cache.Set("foo",'Hello World!')
data = cache.Get("foo")
self.assertEqual('Hello World!', data)
cache.Remove("foo")
def testGetCachedTime(self):
"""Test the twitter._FileCache.GetCachedTime method"""
now = time.time()
cache = twitter._FileCache()
cache.Set("foo",'Hello World!')
cached_time = cache.GetCachedTime("foo")
delta = cached_time - now
self.assert_(delta <= 1,
'Cached time differs from clock time by more than 1 second.')
cache.Remove("foo")
class ApiTest(unittest.TestCase):
def setUp(self):
self._urllib = MockUrllib()
api = twitter.Api(consumer_key='CONSUMER_KEY',
consumer_secret='CONSUMER_SECRET',
access_token_key='OAUTH_TOKEN',
access_token_secret='OAUTH_SECRET',
cache=None)
api.SetUrllib(self._urllib)
self._api = api
def testTwitterError(self):
'''Test that twitter responses containing an error message are wrapped.'''
self._AddHandler('https://api.twitter.com/1/statuses/public_timeline.json',
curry(self._OpenTestData, 'public_timeline_error.json'))
# Manually try/catch so we can check the exception's value
try:
statuses = self._api.GetPublicTimeline()
except twitter.TwitterError, error:
# If the error message matches, the test passes
self.assertEqual('test error', error.message)
else:
self.fail('TwitterError expected')
def testGetPublicTimeline(self):
'''Test the twitter.Api GetPublicTimeline method'''
self._AddHandler('https://api.twitter.com/1/statuses/public_timeline.json?since_id=12345',
curry(self._OpenTestData, 'public_timeline.json'))
statuses = self._api.GetPublicTimeline(since_id=12345)
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(20, len(statuses))
self.assertEqual(89497702, statuses[0].id)
def testGetUserTimeline(self):
'''Test the twitter.Api GetUserTimeline method'''
self._AddHandler('https://api.twitter.com/1/statuses/user_timeline/kesuke.json?count=1',
curry(self._OpenTestData, 'user_timeline-kesuke.json'))
statuses = self._api.GetUserTimeline('kesuke', count=1)
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(89512102, statuses[0].id)
self.assertEqual(718443, statuses[0].user.id)
def testGetFriendsTimeline(self):
'''Test the twitter.Api GetFriendsTimeline method'''
self._AddHandler('https://api.twitter.com/1/statuses/friends_timeline/kesuke.json',
curry(self._OpenTestData, 'friends_timeline-kesuke.json'))
statuses = self._api.GetFriendsTimeline('kesuke')
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(20, len(statuses))
self.assertEqual(718443, statuses[0].user.id)
def testGetStatus(self):
'''Test the twitter.Api GetStatus method'''
self._AddHandler('https://api.twitter.com/1/statuses/show/89512102.json',
curry(self._OpenTestData, 'show-89512102.json'))
status = self._api.GetStatus(89512102)
self.assertEqual(89512102, status.id)
self.assertEqual(718443, status.user.id)
def testDestroyStatus(self):
'''Test the twitter.Api DestroyStatus method'''
self._AddHandler('https://api.twitter.com/1/statuses/destroy/103208352.json',
curry(self._OpenTestData, 'status-destroy.json'))
status = self._api.DestroyStatus(103208352)
self.assertEqual(103208352, status.id)
def testPostUpdate(self):
'''Test the twitter.Api PostUpdate method'''
self._AddHandler('https://api.twitter.com/1/statuses/update.json',
curry(self._OpenTestData, 'update.json'))
status = self._api.PostUpdate(u'Моё судно на воздушной подушке полно угрей'.encode('utf8'))
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(u'Моё судно на воздушной подушке полно угрей', status.text)
def testGetReplies(self):
'''Test the twitter.Api GetReplies method'''
self._AddHandler('https://api.twitter.com/1/statuses/replies.json?page=1',
curry(self._OpenTestData, 'replies.json'))
statuses = self._api.GetReplies(page=1)
self.assertEqual(36657062, statuses[0].id)
def testGetFriends(self):
'''Test the twitter.Api GetFriends method'''
self._AddHandler('https://api.twitter.com/1/statuses/friends.json?cursor=123',
curry(self._OpenTestData, 'friends.json'))
users = self._api.GetFriends(cursor=123)
buzz = [u.status for u in users if u.screen_name == 'buzz']
self.assertEqual(89543882, buzz[0].id)
def testGetFollowers(self):
'''Test the twitter.Api GetFollowers method'''
self._AddHandler('https://api.twitter.com/1/statuses/followers.json?page=1',
curry(self._OpenTestData, 'followers.json'))
users = self._api.GetFollowers(page=1)
# This is rather arbitrary, but spot checking is better than nothing
alexkingorg = [u.status for u in users if u.screen_name == 'alexkingorg']
self.assertEqual(89554432, alexkingorg[0].id)
def testGetFeatured(self):
'''Test the twitter.Api GetFeatured method'''
self._AddHandler('https://api.twitter.com/1/statuses/featured.json',
curry(self._OpenTestData, 'featured.json'))
users = self._api.GetFeatured()
# This is rather arbitrary, but spot checking is better than nothing
stevenwright = [u.status for u in users if u.screen_name == 'stevenwright']
self.assertEqual(86991742, stevenwright[0].id)
def testGetDirectMessages(self):
'''Test the twitter.Api GetDirectMessages method'''
self._AddHandler('https://api.twitter.com/1/direct_messages.json?page=1',
curry(self._OpenTestData, 'direct_messages.json'))
statuses = self._api.GetDirectMessages(page=1)
self.assertEqual(u'A légpárnás hajóm tele van angolnákkal.', statuses[0].text)
def testPostDirectMessage(self):
'''Test the twitter.Api PostDirectMessage method'''
self._AddHandler('https://api.twitter.com/1/direct_messages/new.json',
curry(self._OpenTestData, 'direct_messages-new.json'))
status = self._api.PostDirectMessage('test', u'Моё судно на воздушной подушке полно угрей'.encode('utf8'))
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(u'Моё судно на воздушной подушке полно угрей', status.text)
def testDestroyDirectMessage(self):
'''Test the twitter.Api DestroyDirectMessage method'''
self._AddHandler('https://api.twitter.com/1/direct_messages/destroy/3496342.json',
curry(self._OpenTestData, 'direct_message-destroy.json'))
status = self._api.DestroyDirectMessage(3496342)
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(673483, status.sender_id)
def testCreateFriendship(self):
'''Test the twitter.Api CreateFriendship method'''
self._AddHandler('https://api.twitter.com/1/friendships/create/dewitt.json',
curry(self._OpenTestData, 'friendship-create.json'))
user = self._api.CreateFriendship('dewitt')
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(673483, user.id)
def testDestroyFriendship(self):
'''Test the twitter.Api DestroyFriendship method'''
self._AddHandler('https://api.twitter.com/1/friendships/destroy/dewitt.json',
curry(self._OpenTestData, 'friendship-destroy.json'))
user = self._api.DestroyFriendship('dewitt')
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(673483, user.id)
def testGetUser(self):
'''Test the twitter.Api GetUser method'''
self._AddHandler('https://api.twitter.com/1/users/show/dewitt.json',
curry(self._OpenTestData, 'show-dewitt.json'))
user = self._api.GetUser('dewitt')
self.assertEqual('dewitt', user.screen_name)
self.assertEqual(89586072, user.status.id)
def _AddHandler(self, url, callback):
self._urllib.AddHandler(url, callback)
def _GetTestDataPath(self, filename):
directory = os.path.dirname(os.path.abspath(__file__))
test_data_dir = os.path.join(directory, 'testdata')
return os.path.join(test_data_dir, filename)
def _OpenTestData(self, filename):
f = open(self._GetTestDataPath(filename))
# make sure that the returned object contains an .info() method:
# headers are set to {}
return urllib.addinfo(f, {})
class MockUrllib(object):
'''A mock replacement for urllib that hardcodes specific responses.'''
def __init__(self):
self._handlers = {}
self.HTTPBasicAuthHandler = MockHTTPBasicAuthHandler
def AddHandler(self, url, callback):
self._handlers[url] = callback
def build_opener(self, *handlers):
return MockOpener(self._handlers)
def HTTPHandler(self, *args, **kwargs):
return None
def HTTPSHandler(self, *args, **kwargs):
return None
def OpenerDirector(self):
return self.build_opener()
class MockOpener(object):
'''A mock opener for urllib'''
def __init__(self, handlers):
self._handlers = handlers
self._opened = False
def open(self, url, data=None):
if self._opened:
raise Exception('MockOpener already opened.')
# Remove parameters from URL - they're only added by oauth and we
# don't want to test oauth
if '?' in url:
# We split using & and filter on the beginning of each key
# This is crude but we have to keep the ordering for now
(url, qs) = url.split('?')
tokens = [token for token in qs.split('&')
if not token.startswith('oauth')]
if len(tokens) > 0:
url = "%s?%s"%(url, '&'.join(tokens))
if url in self._handlers:
self._opened = True
return self._handlers[url]()
else:
raise Exception('Unexpected URL %s (Checked: %s)' % (url, self._handlers))
def add_handler(self, *args, **kwargs):
pass
def close(self):
if not self._opened:
raise Exception('MockOpener closed before it was opened.')
self._opened = False
class MockHTTPBasicAuthHandler(object):
'''A mock replacement for HTTPBasicAuthHandler'''
def add_password(self, realm, uri, user, passwd):
# TODO(dewitt): Add verification that the proper args are passed
pass
class curry:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def __init__(self, fun, *args, **kwargs):
self.fun = fun
self.pending = args[:]
self.kwargs = kwargs.copy()
def __call__(self, *args, **kwargs):
if kwargs and self.kwargs:
kw = self.kwargs.copy()
kw.update(kwargs)
else:
kw = kwargs or self.kwargs
return self.fun(*(self.pending + args), **kw)
def suite():
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(FileCacheTest))
suite.addTests(unittest.makeSuite(StatusTest))
suite.addTests(unittest.makeSuite(UserTest))
suite.addTests(unittest.makeSuite(ApiTest))
return suite
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from astm.version import __version__
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
# http://wiki.python.org/moin/Distutils/Cookbook/AutoPackageDiscovery
import os
def is_package(path):
return (
os.path.isdir(path) and
os.path.isfile(os.path.join(path, '__init__.py'))
)
def find_packages(path='.', base=""):
""" Find all packages in path """
packages = {}
for item in os.listdir(path):
dir = os.path.join(path, item)
if is_package(dir):
if base:
module_name = "%(base)s.%(item)s" % vars()
else:
module_name = item
packages[module_name] = dir
packages.update(find_packages(dir, module_name))
return packages
setup(
name = 'astm',
version = __version__,
description = 'Python implementation of ASTM E1381/1394 protocol.',
long_description = open('README').read(),
author = 'Alexander Shorin',
author_email = 'kxepal@gmail.com',
license = 'BSD',
url = 'http://code.google.com/p/python-astm',
install_requires = [],
test_suite = 'astm.tests',
zip_safe = True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Medical Science Apps.'
],
packages = find_packages(),
)
| Python |
# -*- coding: utf-8 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
"""
.. module:: astm.asynclib
:synopsis: Forked version of asyncore mixed with asynchat.
.. moduleauthor:: Sam Rushing <rushing@nightmare.com>
.. sectionauthor:: Christopher Petrilli <petrilli@amber.org>
.. sectionauthor:: Steve Holden <sholden@holdenweb.com>
.. heavily adapted from original documentation by Sam Rushing
"""
import heapq
import logging
import os
import select
import socket
import sys
import time
from collections import deque
from errno import (
EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL,
ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN,
errorcode
)
from .compat import long, b, bytes, buffer
class ExitNow(Exception):
pass
_DISCONNECTED = frozenset((ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
EBADF))
_RERAISEABLE_EXC = (ExitNow, KeyboardInterrupt, SystemExit)
_SOCKET_MAP = {}
_SCHEDULED_TASKS = []
log = logging.getLogger(__name__)
def _strerror(err):
try:
return os.strerror(err)
except (ValueError, OverflowError, NameError):
if err in errorcode:
return errorcode[err]
return "Unknown error %s" % err
def read(obj):
"""Triggers ``handle_read_event`` for specified object."""
try:
obj.handle_read_event()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def write(obj):
"""Triggers ``handle_write_event`` for specified object."""
try:
obj.handle_write_event()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def exception(obj):
"""Triggers ``handle_exception_event`` for specified object."""
try:
obj.handle_exception_event()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & select.POLLIN:
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
if flags & select.POLLPRI:
obj.handle_exception_event()
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
obj.handle_close()
except socket.error as e:
if e.args[0] not in _DISCONNECTED:
obj.handle_error()
else:
obj.handle_close()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = map or _SOCKET_MAP
if map:
r = []; w = []; e = []
for fd, obj in map.items():
is_r = obj.readable()
is_w = obj.writable()
if is_r:
r.append(fd)
# accepting sockets should not be writable
if is_w and not obj.accepting:
w.append(fd)
if is_r or is_w:
e.append(fd)
if [] == r == w == e:
time.sleep(timeout)
return
try:
r, w, e = select.select(r, w, e, timeout)
except select.error as err:
if err.args[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
for fd in e:
obj = map.get(fd)
if obj is None:
continue
exception(obj)
def scheduler(tasks=None):
if tasks is None:
tasks = _SCHEDULED_TASKS
now = time.time()
while tasks and now >= tasks[0].timeout:
call = heapq.heappop(tasks)
if call.repush:
heapq.heappush(tasks, call)
call.repush = False
continue
try:
call.call()
finally:
if not call.cancelled:
call.cancel()
def loop(timeout=30.0, map=None, tasks=None, count=None):
"""
Enter a polling loop that terminates after count passes or all open
channels have been closed. All arguments are optional. The *count*
parameter defaults to None, resulting in the loop terminating only when all
channels have been closed. The *timeout* argument sets the timeout
parameter for the appropriate :func:`select` or :func:`poll` call, measured
in seconds; the default is 30 seconds. The *use_poll* parameter, if true,
indicates that :func:`poll` should be used in preference to :func:`select`
(the default is ``False``).
The *map* parameter is a dictionary whose items are the channels to watch.
As channels are closed they are deleted from their map. If *map* is
omitted, a global map is used. Channels (instances of
:class:`asyncore.dispatcher`, :class:`asynchat.async_chat` and subclasses
thereof) can freely be mixed in the map.
"""
if map is None:
map = _SOCKET_MAP
if tasks is None:
tasks = _SCHEDULED_TASKS
if count is None:
while map or tasks:
if map:
poll(timeout, map)
if tasks:
scheduler()
else:
while (map or tasks) and count > 0:
if map:
poll(timeout, map)
if tasks:
scheduler()
count -= 1
class call_later:
"""Calls a function at a later time.
It can be used to asynchronously schedule a call within the polling
loop without blocking it. The instance returned is an object that
can be used to cancel or reschedule the call.
"""
def __init__(self, seconds, target, *args, **kwargs):
"""
- seconds: the number of seconds to wait
- target: the callable object to call later
- args: the arguments to call it with
- kwargs: the keyword arguments to call it with
- _tasks: a reserved keyword to specify a different list to
store the delayed call instances.
"""
assert callable(target), "%s is not callable" % target
assert seconds >= 0, \
"%s is not greater than or equal to 0 seconds" % (seconds)
self.__delay = seconds
self.__target = target
self.__args = args
self.__kwargs = kwargs
self.__tasks = kwargs.pop('_tasks', _SCHEDULED_TASKS)
# seconds from the epoch at which to call the function
self.timeout = time.time() + self.__delay
self.repush = False
self.cancelled = False
heapq.heappush(self.__tasks, self)
def __lt__(self, other):
return self.timeout <= other.timeout
def call(self):
"""Call this scheduled function."""
assert not self.cancelled, "Already cancelled"
self.__target(*self.__args, **self.__kwargs)
def reset(self):
"""Reschedule this call resetting the current countdown."""
assert not self.cancelled, "Already cancelled"
self.timeout = time.time() + self.__delay
self.repush = True
def delay(self, seconds):
"""Reschedule this call for a later time."""
assert not self.cancelled, "Already cancelled."
assert seconds >= 0, \
"%s is not greater than or equal to 0 seconds" % (seconds)
self.__delay = seconds
newtime = time.time() + self.__delay
if newtime > self.timeout:
self.timeout = newtime
self.repush = True
else:
# XXX - slow, can be improved
self.timeout = newtime
heapq.heapify(self.__tasks)
def cancel(self):
"""Unschedule this call."""
assert not self.cancelled, "Already cancelled"
self.cancelled = True
del self.__target, self.__args, self.__kwargs
if self in self.__tasks:
pos = self.__tasks.index(self)
if pos == 0:
heapq.heappop(self.__tasks)
elif pos == len(self.__tasks) - 1:
self.__tasks.pop(pos)
else:
self.__tasks[pos] = self.__tasks.pop()
heapq._siftup(self.__tasks, pos)
class Dispatcher(object):
"""
The :class:`Dispatcher` class is a thin wrapper around a low-level socket
object. To make it more useful, it has a few methods for event-handling
which are called from the asynchronous loop. Otherwise, it can be treated
as a normal non-blocking socket object.
The firing of low-level events at certain times or in certain connection
states tells the asynchronous loop that certain higher-level events have
taken place. For example, if we have asked for a socket to connect to
another host, we know that the connection has been made when the socket
becomes writable for the first time (at this point you know that you may
write to it with the expectation of success). The implied higher-level
events are:
+----------------------+----------------------------------------+
| Event | Description |
+======================+========================================+
| ``handle_connect()`` | Implied by the first read or write |
| | event |
+----------------------+----------------------------------------+
| ``handle_close()`` | Implied by a read event with no data |
| | available |
+----------------------+----------------------------------------+
| ``handle_accept()`` | Implied by a read event on a listening |
| | socket |
+----------------------+----------------------------------------+
During asynchronous processing, each mapped channel's :meth:`readable` and
:meth:`writable` methods are used to determine whether the channel's socket
should be added to the list of channels :c:func:`select`\ ed or
:c:func:`poll`\ ed for read and write events.
"""
connected = False
accepting = False
addr = None
def __init__(self, sock=None, map=None):
if map is None:
self._map = _SOCKET_MAP
else:
self._map = map
self._fileno = None
if sock:
# Set to nonblocking just to make sure for cases where we
# get a socket from a blocking source.
sock.setblocking(0)
self.set_socket(sock, map)
self.connected = True
# The constructor no longer requires that the socket
# passed be connected.
try:
self.addr = sock.getpeername()
except socket.error as err:
if err.args[0] == ENOTCONN:
# To handle the case where we got an unconnected
# socket.
self.connected = False
else:
# The socket is broken in some unknown way, alert
# the user and remove it from the map (to prevent
# polling of broken sockets).
self._del_channel(map)
raise
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__ + '.' + self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
return '<%s at %#x>' % (' '.join(status), id(self))
__str__ = __repr__
def _add_channel(self, map=None):
log.debug('Adding channel %s' % self)
if map is None:
map = self._map
map[self._fileno] = self
def _del_channel(self, map=None):
fd = self._fileno
if map is None:
map = self._map
if fd in map:
log.debug('Closing channel %d:%s' % (fd, self))
del map[fd]
self._fileno = None
def create_socket(self, family, type):
"""
This is identical to the creation of a normal socket, and will use
the same options for creation. Refer to the :mod:`socket` documentation
for information on creating sockets.
"""
self.family_and_type = family, type
sock = socket.socket(family, type)
sock.setblocking(0)
self.set_socket(sock)
def set_socket(self, sock, map=None):
self.socket = sock
self._fileno = sock.fileno()
self._add_channel(map)
def set_reuse_addr(self):
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
def readable(self):
"""
Called each time around the asynchronous loop to determine whether a
channel's socket should be added to the list on which read events can
occur. The default method simply returns ``True``, indicating that by
default, all channels will be interested in read events."""
return True
def writable(self):
"""
Called each time around the asynchronous loop to determine whether a
channel's socket should be added to the list on which write events can
occur. The default method simply returns ``True``, indicating that by
default, all channels will be interested in write events.
"""
return True
def listen(self, num):
"""Listen for connections made to the socket.
The `num` argument specifies the maximum number of queued connections
and should be at least 1; the maximum value is system-dependent
(usually 5)."""
self.accepting = True
if os.name == 'nt' and num > 5:
num = 5
return self.socket.listen(num)
def bind(self, address):
"""Bind the socket to `address`.
The socket must not already be bound. The format of `address` depends
on the address family --- refer to the :mod:`socket` documentation for
more information. To mark the socket as re-usable (setting the
:const:`SO_REUSEADDR` option), call the :class:`Dispatcher` object's
:meth:`set_reuse_addr` method.
"""
self.addr = address
return self.socket.bind(address)
def connect(self, address):
"""
As with the normal socket object, `address` is a tuple with the first
element the host to connect to, and the second the port number.
"""
self.connected = False
self.addr = address
err = self.socket.connect_ex(address)
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK)\
or err == EINVAL and os.name in ('nt', 'ce'):
return
if err in (0, EISCONN):
self.handle_connect_event()
else:
raise socket.error(err, errorcode[err])
def accept(self):
"""Accept a connection.
The socket must be bound to an address and listening for connections.
The return value can be either ``None`` or a pair ``(conn, address)``
where `conn` is a *new* socket object usable to send and receive data on
the connection, and *address* is the address bound to the socket on the
other end of the connection.
When ``None`` is returned it means the connection didn't take place, in
which case the server should just ignore this event and keep listening
for further incoming connections.
"""
try:
conn, addr = self.socket.accept()
except TypeError:
return None
except socket.error as err:
if err.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
return None
else:
raise
else:
return conn, addr
def send(self, data):
"""Send `data` to the remote end-point of the socket."""
try:
log.debug('[%s:%d] <<< %r', self.addr[0], self.addr[1], data)
result = self.socket.send(data)
return result
except socket.error as err:
if err.args[0] == EWOULDBLOCK:
return 0
elif err.args[0] in _DISCONNECTED:
self.handle_close()
return 0
else:
raise
def recv(self, buffer_size):
"""Read at most `buffer_size` bytes from the socket's remote end-point.
An empty string implies that the channel has been closed from the other
end.
"""
try:
data = self.socket.recv(buffer_size)
log.debug('[%s:%d] >>> %r', self.addr[0], self.addr[1], data)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return b''
else:
return data
except socket.error as err:
# winsock sometimes throws ENOTCONN
if err.args[0] in _DISCONNECTED:
self.handle_close()
return b''
else:
raise
def close(self):
"""Close the socket.
All future operations on the socket object will fail.
The remote end-point will receive no more data (after queued data is
flushed). Sockets are automatically closed when they are
garbage-collected.
"""
self.connected = False
self.accepting = False
self._del_channel()
try:
self.socket.close()
except socket.error as err:
if err.args[0] not in (ENOTCONN, EBADF):
raise
def handle_read_event(self):
if self.accepting:
# accepting sockets are never connected, they "spawn" new
# sockets that are connected
self.handle_accept()
elif not self.connected:
self.handle_connect_event()
self.handle_read()
else:
self.handle_read()
def handle_connect_event(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect()
self.connected = True
def handle_write_event(self):
if self.accepting:
# Accepting sockets shouldn't get a write event.
# We will pretend it didn't happen.
return
if not self.connected:
#check for errors
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect_event()
self.handle_write()
def handle_exception_event(self):
# handle_exception_event() is called if there might be an error on the
# socket, or if there is OOB data
# check for the error condition first
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# we can get here when select.select() says that there is an
# exceptional condition on the socket
# since there is an error, we'll go ahead and close the socket
# like we would in a subclassed handle_read() that received no
# data
self.handle_close()
else:
self.handle_exception()
def handle_error(self):
"""
Called when an exception is raised and not otherwise handled.
The default version prints a condensed traceback.
"""
try:
self_repr = repr(self)
except Exception:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
log.exception('Uncatched python exception, closing channel %s',
self_repr)
self.handle_close()
def handle_exception(self):
log.exception('Unknown error')
def handle_read(self):
log.debug('Unhandled read event')
def handle_write(self):
"""
Called when the asynchronous loop detects that a writable socket can be
written. Often this method will implement the necessary buffering for
performance. For example::
def handle_write(self):
sent = self.send(self.buffer)
self.buffer = self.buffer[sent:]
"""
log.debug('Unhandled write event')
def handle_connect(self):
"""
Called when the active opener's socket actually makes a connection.
Might send a "welcome" banner, or initiate a protocol negotiation with
the remote endpoint, for example.
"""
log.info('[%s:%d] Connection established', self.addr[0], self.addr[1])
def handle_accept(self):
"""
Called on listening channels (passive openers) when a connection can be
established with a new remote endpoint that has issued a :meth:`connect`
call for the local endpoint.
"""
log.info('[%s:%d] Connection accepted', self.addr[0], self.addr[1])
def handle_close(self):
"""Called when the socket is closed."""
log.info('[%s:%d] Connection closed', self.addr[0], self.addr[1])
self.close()
def close_all(map=None, tasks=None, ignore_all=False):
if map is None:
map = _SOCKET_MAP
if tasks is None:
tasks = _SCHEDULED_TASKS
for x in list(map.values()):
try:
x.close()
except OSError as err:
if err.args[0] == EBADF:
pass
elif not ignore_all:
raise
except _RERAISEABLE_EXC:
raise
except Exception:
if not ignore_all:
raise
map.clear()
for x in tasks:
try:
x.cancel()
except _RERAISEABLE_EXC:
raise
except Exception:
if not ignore_all:
raise
del tasks[:]
class AsyncChat(Dispatcher):
"""
This class is an abstract subclass of :class:`Dispatcher`. To make
practical use of the code you must subclass :class:`AsyncChat`, providing
meaningful meth:`found_terminator` method.
The :class:`Dispatcher` methods can be used, although not all make
sense in a message/response context.
Like :class:`Dispatcher`, :class:`AsyncChat` defines a set of
events that are generated by an analysis of socket conditions after a
:c:func:`select` call. Once the polling loop has been started the
:class:`AsyncChat` object's methods are called by the event-processing
framework with no action on the part of the programmer.
"""
# these are overridable defaults
#: The asynchronous input buffer size.
recv_buffer_size = 4096
#: The asynchronous output buffer size.
send_buffer_size = 4096
#: Encoding usage is not enabled by default, because that is a
#: sign of an application bug that we don't want to pass silently.
use_encoding = False
#: Default encoding.
encoding = 'latin-1'
#: Remove terminator from the result data.
strip_terminator = True
_terminator = None
def __init__(self, sock=None, map=None):
# for string terminator matching
self._input_buffer = b''
self.inbox = deque()
self.outbox = deque()
super(AsyncChat, self).__init__(sock, map)
self.collect_incoming_data = self.pull
self.initiate_send = self.flush
def pull(self, data):
"""Puts `data` into incoming queue. Also available by alias
`collect_incoming_data`.
"""
self.inbox.append(data)
def found_terminator(self):
"""
Called when the incoming data stream matches the :attr:`termination`
condition. The default method, which must be overridden, raises a
:exc:`NotImplementedError` exception. The buffered input data should be
available via an instance attribute.
"""
raise NotImplementedError("must be implemented in subclass")
def _set_terminator(self, term):
self._terminator = term
def _get_terminator(self):
return self._terminator
#: The input delimiter and the terminating condition to be recognized on the
#: channel. May be any of three types of value, corresponding to three
#: different ways to handle incoming protocol data.
#:
#: +-----------+---------------------------------------------+
#: | term | Description |
#: +===========+=============================================+
#: | *string* | Will call :meth:`found_terminator` when the |
#: | | string is found in the input stream |
#: +-----------+---------------------------------------------+
#: | *integer* | Will call :meth:`found_terminator` when the |
#: | | indicated number of characters have been |
#: | | received |
#: +-----------+---------------------------------------------+
#: | ``None`` | The channel continues to collect data |
#: | | forever |
#: +-----------+---------------------------------------------+
#:
#: Note that any data following the terminator will be available for reading
#: by the channel after :meth:`found_terminator` is called.
terminator = property(_get_terminator, _set_terminator)
def handle_read(self):
try:
data = self.recv(self.recv_buffer_size)
except socket.error as err:
self.handle_error()
return
if self.use_encoding and not isinstance():
data = data.decode(self.encoding)
self._input_buffer += data
while self._input_buffer:
terminator = self.terminator
if not terminator:
handler = self._lookup_none_terminator
elif isinstance(terminator, (int, long)):
handler = self._lookup_int_terminator
elif isinstance(terminator, str):
handler = self._lookup_str_terminator
else:
handler = self._lookup_list_terminator
res = handler(self.terminator)
if res is None:
break
def _lookup_none_terminator(self, terminator):
self.pull(self._input_buffer)
self._input_buffer = ''
return False
def _lookup_int_terminator(self, terminator):
if len(self._input_buffer) < terminator:
self.pull(self._input_buffer)
self._input_buffer = ''
return False
else:
self.pull(self._input_buffer[:terminator])
self._input_buffer = self._input_buffer[terminator:]
self.found_terminator()
return True
def _lookup_list_terminator(self, terminator):
for item in terminator:
if self._input_buffer.find(item) != -1:
return self._lookup_str_terminator(item)
return self._lookup_none_terminator(terminator)
def _lookup_str_terminator(self, terminator):
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
terminator_len = len(terminator)
index = self._input_buffer.find(terminator)
if index != -1:
# we found the terminator
if self.strip_terminator and index > 0:
self.pull(self._input_buffer[:index])
elif not self.strip_terminator:
self.pull(self._input_buffer[:index+terminator_len])
self._input_buffer = self._input_buffer[index+terminator_len:]
# This does the Right Thing if the terminator is changed here.
self.found_terminator()
return True
else:
# check for a prefix of the terminator
index = find_prefix_at_end(self._input_buffer, terminator)
if index:
if index != len(self._input_buffer):
# we found a prefix, collect up to the prefix
self.pull(self._input_buffer[:-index])
self._input_buffer = self._input_buffer[-index:]
return None
else:
# no prefix, collect it all
self.pull(self._input_buffer)
self._input_buffer = ''
return False
def handle_write(self):
self.flush()
def push(self, data):
"""
Pushes data on to the channel's fifo to ensure its transmission.
This is all you need to do to have the channel write the data out to
the network.
"""
sabs = self.send_buffer_size
if len(data) > sabs:
for i in range(0, len(data), sabs):
self.outbox.append(data[i:i+sabs])
else:
self.outbox.append(data)
return self.flush()
def push_with_producer(self, producer):
self.outbox.append(producer)
return self.flush()
def readable(self):
"""Predicate for inclusion in the readable for select()"""
return True
def writable(self):
"""Predicate for inclusion in the writable for select()"""
# For nonblocking sockets connect() will not set self.connected flag,
# due to EINPROGRESS socket error which is actually promise for
# successful connection.
return bool(self.outbox or not self.connected)
def close_when_done(self):
"""Automatically close this channel once the outgoing queue is empty."""
self.outbox.append(None)
def flush(self):
"""Sends all data from outgoing queue."""
while self.outbox and self.connected:
self._send_chunky(self.outbox.popleft())
def _send_chunky(self, data):
"""Sends data as chunks sized by ``send_buffer_size`` value.
Returns ``True`` on success, ``False`` on error and ``None`` on closing
event.
"""
if self.use_encoding and not isinstance(data, bytes):
data = data.encode(self.encoding)
while True:
if data is None:
self.handle_close()
return
obs = self.send_buffer_size
bdata = buffer(data, 0, obs)
try:
num_sent = self.send(bdata)
except socket.error:
self.handle_error()
return False
if num_sent and num_sent < len(bdata) or obs < len(data):
data = data[num_sent:]
else:
return True
def discard_buffers(self):
"""In emergencies this method will discard any data held in the input
and output buffers."""
self.discard_input_buffers()
self.discard_output_buffers()
def discard_input_buffers(self):
self._input_buffer = b('')
self.inbox.clear()
def discard_output_buffers(self):
self.outbox.clear()
def find_prefix_at_end(haystack, needle):
l = len(needle) - 1
while l and not haystack.endswith(needle[:l]):
l -= 1
return l
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from collections import Iterable
from .compat import unicode
from .constants import (
STX, ETX, ETB, CR, LF, CRLF,
FIELD_SEP, COMPONENT_SEP, RECORD_SEP, REPEAT_SEP, ENCODING
)
try:
from itertools import izip_longest
except ImportError: # Python 3
from itertools import zip_longest as izip_longest
def decode(data, encoding=ENCODING):
"""Common ASTM decoding function that tries to guess which kind of data it
handles.
If `data` starts with STX character (``0x02``) than probably it is
full ASTM message with checksum and other system characters.
If `data` starts with digit character (``0-9``) than probably it is
frame of records leading by his sequence number. No checksum is expected
in this case.
Otherwise it counts `data` as regular record structure.
Note, that `data` should be bytes, not unicode string even if you know his
`encoding`.
:param data: ASTM data object.
:type data: bytes
:param encoding: Data encoding.
:type encoding: str
:return: List of ASTM records with unicode data.
:rtype: list
"""
if not isinstance(data, bytes):
raise TypeError('bytes expected, got %r' % data)
if data.startswith(STX): # may be decode message \x02...\x03CS\r\n
seq, records, cs = decode_message(data, encoding)
return records
byte = data[:1].decode()
if byte.isdigit():
seq, records = decode_frame(data, encoding)
return records
return [decode_record(data, encoding)]
def decode_message(message, encoding):
"""Decodes complete ASTM message that is sent or received due
communication routines. It should contains checksum that would be
additionally verified.
:param message: ASTM message.
:type message: bytes
:param encoding: Data encoding.
:type encoding: str
:returns: Tuple of three elements:
* :class:`int` frame sequence number.
* :class:`list` of records with unicode data.
* :class:`bytes` checksum.
:raises:
* :exc:`ValueError` if ASTM message is malformed.
* :exc:`AssertionError` if checksum verification fails.
"""
if not isinstance(message, bytes):
raise TypeError('bytes expected, got %r' % message)
if not (message.startswith(STX) and message.endswith(CRLF)):
raise ValueError('Malformed ASTM message. Expected that it will started'
' with %x and followed by %x%x characters. Got: %r'
' ' % (ord(STX), ord(CR), ord(LF), message))
stx, frame_cs = message[0], message[1:-2]
frame, cs = frame_cs[:-2], frame_cs[-2:]
ccs = make_checksum(frame)
assert cs == ccs, 'Checksum failure: expected %r, calculated %r' % (cs, ccs)
seq, records = decode_frame(frame, encoding)
return seq, records, cs.decode()
def decode_frame(frame, encoding):
"""Decodes ASTM frame: list of records followed by sequence number."""
if not isinstance(frame, bytes):
raise TypeError('bytes expected, got %r' % frame)
if frame.endswith(CR + ETX):
frame = frame[:-2]
elif frame.endswith(ETB):
frame = frame[:-1]
else:
raise ValueError('Incomplete frame data %r.'
' Expected trailing <CR><ETX> or <ETB> chars' % frame)
seq = frame[:1].decode()
if not seq.isdigit():
raise ValueError('Malformed ASTM frame. Expected leading seq number %r'
'' % frame)
seq, records = int(seq), frame[1:]
return seq, [decode_record(record, encoding)
for record in records.split(RECORD_SEP)]
def decode_record(record, encoding):
"""Decodes ASTM record message."""
fields = []
for item in record.split(FIELD_SEP):
if REPEAT_SEP in item:
item = decode_repeated_component(item, encoding)
elif COMPONENT_SEP in item:
item = decode_component(item, encoding)
else:
item = item.decode(encoding)
fields.append([None, item][bool(item)])
return fields
def decode_component(field, encoding):
"""Decodes ASTM field component."""
return [[None, item.decode(encoding)][bool(item)]
for item in field.split(COMPONENT_SEP)]
def decode_repeated_component(component, encoding):
"""Decodes ASTM field repeated component."""
return [decode_component(item, encoding)
for item in component.split(REPEAT_SEP)]
def encode(records, encoding=ENCODING, size=None, seq=1):
"""Encodes list of records into single ASTM message, also called as "packed"
message.
If you need to get each record as standalone message use :func:`iter_encode`
instead.
If the result message is too large (greater than specified `size` if it's
not :const:`None`), than it will be split by chunks.
:param records: List of ASTM records.
:type records: list
:param encoding: Data encoding.
:type encoding: str
:param size: Chunk size in bytes.
:type size: int
:param seq: Frame start sequence number.
:type seq: int
:return: List of ASTM message chunks.
:rtype: list
"""
msg = encode_message(seq, records, encoding)
if size is not None and len(msg) > size:
return list(split(msg, size))
return [msg]
def iter_encode(records, encoding=ENCODING, size=None, seq=1):
"""Encodes and emits each record as separate message.
If the result message is too large (greater than specified `size` if it's
not :const:`None`), than it will be split by chunks.
:yields: ASTM message chunks.
:rtype: str
"""
for record in records:
msg = encode_message(seq, [record], encoding)
if size is not None and len(msg) > size:
for chunk in split(msg, size):
seq += 1
yield chunk
else:
seq += 1
yield msg
def encode_message(seq, records, encoding):
"""Encodes ASTM message.
:param seq: Frame sequence number.
:type seq: int
:param records: List of ASTM records.
:type records: list
:param encoding: Data encoding.
:type encoding: str
:return: ASTM complete message with checksum and other control characters.
:rtype: str
"""
data = RECORD_SEP.join(encode_record(record, encoding)
for record in records)
data = b''.join((str(seq % 8).encode(), data, CR, ETX))
return b''.join([STX, data, make_checksum(data), CR, LF])
def encode_record(record, encoding):
"""Encodes single ASTM record.
:param record: ASTM record. Each :class:`str`-typed item counted as field
value, one level nested :class:`list` counted as components
and second leveled - as repeated components.
:type record: list
:param encoding: Data encoding.
:type encoding: str
:returns: Encoded ASTM record.
:rtype: str
"""
fields = []
_append = fields.append
for field in record:
if isinstance(field, bytes):
_append(field)
elif isinstance(field, unicode):
_append(field.encode(encoding))
elif isinstance(field, Iterable):
_append(encode_component(field, encoding))
elif field is None:
_append(b'')
else:
_append(unicode(field).encode(encoding))
return FIELD_SEP.join(fields)
def encode_component(component, encoding):
"""Encodes ASTM record field components."""
items = []
_append = items.append
for item in component:
if isinstance(item, bytes):
_append(item)
elif isinstance(item, unicode):
_append(item.encode(encoding))
elif isinstance(item, Iterable):
return encode_repeated_component(component, encoding)
elif item is None:
_append(b'')
else:
_append(unicode(item).encode(encoding))
return COMPONENT_SEP.join(items).rstrip(COMPONENT_SEP)
def encode_repeated_component(components, encoding):
"""Encodes repeated components."""
return REPEAT_SEP.join(encode_component(item, encoding)
for item in components)
def make_checksum(message):
"""Calculates checksum for specified message.
:param message: ASTM message.
:type message: bytes
:returns: Checksum value that is actually byte sized integer in hex base
:rtype: bytes
"""
if not isinstance(message[0], int):
message = map(ord, message)
return hex(sum(message) & 0xFF)[2:].upper().zfill(2).encode()
def make_chunks(s, n):
iter_bytes = (s[i:i + 1] for i in range(len(s)))
return [b''.join(item)
for item in izip_longest(*[iter_bytes] * n, fillvalue=b'')]
def split(msg, size):
"""Split `msg` into chunks with specified `size`.
Chunk `size` value couldn't be less then 7 since each chunk goes with at
least 7 special characters: STX, frame number, ETX or ETB, checksum and
message terminator.
:param msg: ASTM message.
:type msg: bytes
:param size: Chunk size in bytes.
:type size: int
:yield: `bytes`
"""
stx, frame, msg, tail = msg[:1], msg[1:2], msg[2:-6], msg[-6:]
assert stx == STX
assert frame.isdigit()
assert tail.endswith(CRLF)
assert size is not None and size >= 7
frame = int(frame)
chunks = make_chunks(msg, size - 7)
chunks, last = chunks[:-1], chunks[-1]
idx = 0
for idx, chunk in enumerate(chunks):
item = b''.join([str((idx + frame) % 8).encode(), chunk, ETB])
yield b''.join([STX, item, make_checksum(item), CRLF])
item = b''.join([str((idx + frame + 1) % 8).encode(), last, CR, ETX])
yield b''.join([STX, item, make_checksum(item), CRLF])
def join(chunks):
"""Merges ASTM message `chunks` into single message.
:param chunks: List of chunks as `bytes`.
:type chunks: iterable
"""
msg = b'1' + b''.join(c[2:-5] for c in chunks) + ETX
return b''.join([STX, msg, make_checksum(msg), CRLF])
def is_chunked_message(message):
"""Checks plain message for chunked byte."""
length = len(message)
if len(message) < 5:
return False
if ETB not in message:
return False
return message.index(ETB) == length - 5
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import logging
import socket
from .asynclib import Dispatcher, loop
from .codec import decode_message, is_chunked_message, join
from .constants import ACK, CRLF, EOT, NAK, ENCODING
from .exceptions import InvalidState, NotAccepted
from .protocol import ASTMProtocol
log = logging.getLogger(__name__)
__all__ = ['BaseRecordsDispatcher', 'RequestHandler', 'Server']
class BaseRecordsDispatcher(object):
"""Abstract dispatcher of received ASTM records by :class:`RequestHandler`.
You need to override his handlers or extend dispatcher for your needs.
For instance::
class Dispatcher(BaseRecordsDispatcher):
def __init__(self, encoding=None):
super(Dispatcher, self).__init__(encoding)
# extend it for your needs
self.dispatch['M'] = self.my_handler
# map custom wrappers for ASTM records to their type if you
# don't like to work with raw data.
self.wrapper['M'] = MyWrapper
def on_header(self, record):
# initialize state for this session
...
def on_patient(self, record):
# handle patient info
...
# etc handlers
def my_handler(self, record):
# handle custom record that wasn't implemented yet by
# python-astm due to some reasons
...
After defining our dispatcher, we left only to let :class:`Server` use it::
server = Server(dispatcher=Dispatcher)
"""
#: Encoding of received messages.
encoding = ENCODING
def __init__(self, encoding=None):
self.encoding = encoding or self.encoding
self.dispatch = {
'H': self.on_header,
'C': self.on_comment,
'P': self.on_patient,
'O': self.on_order,
'R': self.on_result,
'S': self.on_scientific,
'M': self.on_manufacturer_info,
'L': self.on_terminator
}
self.wrappers = {}
def __call__(self, message):
seq, records, cs = decode_message(message, self.encoding)
for record in records:
self.dispatch.get(record[0], self.on_unknown)(self.wrap(record))
def wrap(self, record):
rtype = record[0]
if rtype in self.wrappers:
return self.wrappers[rtype](*record)
return record
def _default_handler(self, record):
log.warn('Record remains unprocessed: %s', record)
def on_header(self, record):
"""Header record handler."""
self._default_handler(record)
def on_comment(self, record):
"""Comment record handler."""
self._default_handler(record)
def on_patient(self, record):
"""Patient record handler."""
self._default_handler(record)
def on_order(self, record):
"""Order record handler."""
self._default_handler(record)
def on_result(self, record):
"""Result record handler."""
self._default_handler(record)
def on_scientific(self, record):
"""Scientific record handler."""
self._default_handler(record)
def on_manufacturer_info(self, record):
"""Manufacturer information record handler."""
self._default_handler(record)
def on_terminator(self, record):
"""Terminator record handler."""
self._default_handler(record)
def on_unknown(self, record):
"""Fallback handler for dispatcher."""
self._default_handler(record)
class RequestHandler(ASTMProtocol):
"""ASTM protocol request handler.
:param sock: Socket object.
:param dispatcher: Request handler records dispatcher instance.
:type dispatcher: :class:`BaseRecordsDispatcher`
:param timeout: Number of seconds to wait for incoming data before
connection closing.
:type timeout: int
"""
def __init__(self, sock, dispatcher, timeout=None):
super(RequestHandler, self).__init__(sock, timeout=timeout)
self._chunks = []
host, port = sock.getpeername() if sock is not None else (None, None)
self.client_info = {'host': host, 'port': port}
self.dispatcher = dispatcher
self._is_transfer_state = False
self.terminator = 1
def on_enq(self):
if not self._is_transfer_state:
self._is_transfer_state = True
self.terminator = [CRLF, EOT]
return ACK
else:
log.error('ENQ is not expected')
return NAK
def on_ack(self):
raise NotAccepted('Server should not be ACKed.')
def on_nak(self):
raise NotAccepted('Server should not be NAKed.')
def on_eot(self):
if self._is_transfer_state:
self._is_transfer_state = False
self.terminator = 1
else:
raise InvalidState('Server is not ready to accept EOT message.')
def on_message(self):
if not self._is_transfer_state:
self.discard_input_buffers()
return NAK
else:
try:
self.handle_message(self._last_recv_data)
return ACK
except Exception:
log.exception('Error occurred on message handling.')
return NAK
def handle_message(self, message):
self.is_chunked_transfer = is_chunked_message(message)
if self.is_chunked_transfer:
self._chunks.append(message)
elif self._chunks:
self._chunks.append(message)
self.dispatcher(join(self._chunks))
self._chunks = []
else:
self.dispatcher(message)
def discard_input_buffers(self):
self._chunks = []
return super(RequestHandler, self).discard_input_buffers()
def on_timeout(self):
"""Closes connection on timeout."""
super(RequestHandler, self).on_timeout()
self.close()
class Server(Dispatcher):
"""Asyncore driven ASTM server.
:param host: Server IP address or hostname.
:type host: str
:param port: Server port number.
:type port: int
:param request: Custom server request handler. If omitted the
:class:`RequestHandler` will be used by default.
:param dispatcher: Custom request handler records dispatcher. If omitted the
:class:`BaseRecordsDispatcher` will be used by default.
:param timeout: :class:`RequestHandler` connection timeout. If :const:`None`
request handler will wait for data before connection
closing.
:type timeout: int
:param encoding: :class:`Dispatcher <BaseRecordsDispatcher>`\'s encoding.
:type encoding: str
"""
request = RequestHandler
dispatcher = BaseRecordsDispatcher
def __init__(self, host='localhost', port=15200,
request=None, dispatcher=None,
timeout=None, encoding=None):
super(Server, self).__init__()
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
self.pool = []
self.timeout = timeout
self.encoding = encoding
if request is not None:
self.request = request
if dispatcher is not None:
self.dispatcher = dispatcher
def handle_accept(self):
pair = self.accept()
if pair is None:
return
sock, addr = pair
self.request(sock, self.dispatcher(self.encoding), timeout=self.timeout)
super(Server, self).handle_accept()
def serve_forever(self, *args, **kwargs):
"""Enters into the :func:`polling loop <asynclib.loop>` to let server
handle incoming requests."""
loop(*args, **kwargs)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import logging
from .asynclib import AsyncChat, call_later
from .records import HeaderRecord, TerminatorRecord
from .constants import STX, ENQ, ACK, NAK, EOT, ENCODING
log = logging.getLogger(__name__)
__all__ = ['ASTMProtocol']
class ASTMProtocol(AsyncChat):
"""Common ASTM protocol routines."""
#: ASTM header record class.
astm_header = HeaderRecord
#: ASTM terminator record class.
astm_terminator = TerminatorRecord
#: Flag about chunked transfer.
is_chunked_transfer = None
#: IO timer
timer = None
encoding = ENCODING
strip_terminator = False
_last_recv_data = None
_last_sent_data = None
def __init__(self, sock=None, map=None, timeout=None):
super(ASTMProtocol, self).__init__(sock, map)
if timeout is not None:
self.timer = call_later(timeout, self.on_timeout)
def found_terminator(self):
while self.inbox:
data = self.inbox.popleft()
if not data:
continue
self.dispatch(data)
def dispatch(self, data):
"""Dispatcher of received data."""
self._last_recv_data = data
if data == ENQ:
handler = self.on_enq
elif data == ACK:
handler = self.on_ack
elif data == NAK:
handler = self.on_nak
elif data == EOT:
handler = self.on_eot
elif data.startswith(STX): # this looks like a message
handler = self.on_message
else:
handler = lambda: self.default_handler(data)
resp = handler()
if resp is not None:
self.push(resp)
def default_handler(self, data):
raise ValueError('Unable to dispatch data: %r', data)
def push(self, data):
self._last_sent_data = data
if self.timer is not None and not self.timer.cancelled:
self.timer.reset()
return super(ASTMProtocol, self).push(data)
def on_enq(self):
"""Calls on <ENQ> message receiving."""
def on_ack(self):
"""Calls on <ACK> message receiving."""
def on_nak(self):
"""Calls on <NAK> message receiving."""
def on_eot(self):
"""Calls on <EOT> message receiving."""
def on_message(self):
"""Calls on ASTM message receiving."""
def on_timeout(self):
"""Calls when timeout event occurs. Used to limit waiting time for
response data."""
log.warning('Communication timeout')
def handle_read(self):
if self.timer is not None and not self.timer.cancelled:
self.timer.reset()
super(ASTMProtocol, self).handle_read()
def handle_close(self):
if self.timer is not None and not self.timer.cancelled:
self.timer.cancel()
super(ASTMProtocol, self).handle_close()
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
class BaseASTMError(Exception):
"""Base ASTM error."""
class InvalidState(BaseASTMError):
"""Should be raised in case of invalid ASTM handler state."""
class NotAccepted(BaseException):
"""Received data is not acceptable."""
class Rejected(BaseASTMError):
"""Should be raised after unsuccessful attempts to send data
(receiver sends with <NAK> reply)."""
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import logging
import socket
from .asynclib import loop
from .codec import encode
from .constants import ENQ, EOT
from .exceptions import NotAccepted
from .mapping import Record
from .protocol import ASTMProtocol
log = logging.getLogger(__name__)
__all__ = ['Client', 'Emitter']
class RecordsStateMachine(object):
"""Simple state machine to track emitting ASTM records in right order.
:param mapping: Mapping of the ASTM records flow order.
Keys should be string and defines record type, while values
expected as sequence of other record types that may be used
after current one.
For example: ``{"H": ["P", "C", "L"]}`` mapping defines that
if previous record had ``"H"`` type, then the next one
should have ``"P"``, ``"C"`` or ``"L"`` type or
:exc:`AssertionError` will be raised. The default mapping
reflects common ASTM records flow rules. If this argument
specified as :const:`None` no rules will be applied.
:type: dict
"""
def __init__(self, mapping):
self.mapping = mapping
self.state = None
def __call__(self, state):
if state is not None:
assert self.is_acceptable(state),\
'invalid state %r, expected one of: %r' \
% (state, self.mapping[self.state])
self.state = state
def is_acceptable(self, state):
if self.mapping is None:
return True
if state not in self.mapping:
return False
next_types = self.mapping[self.state]
return '*' in next_types or state in next_types
DEFAULT_RECORDS_FLOW_MAP = {
None: ['H'],
'H': ['C', 'M', 'P', 'L'],
'P': ['C', 'M', 'O', 'L'],
'O': ['C', 'M', 'P', 'O', 'R', 'L'],
'R': ['C', 'M', 'P', 'O', 'R', 'S', 'L'],
'S': ['C', 'M', 'P', 'O', 'R', 'S', 'L'],
'C': ['*'],
'M': ['*'],
'L': ['H']
}
class Emitter(object):
"""ASTM records emitter for :class:`Client`.
Used as wrapper for user provided one to provide proper routines around for
sending Header and Terminator records.
:param emitter: Generator/coroutine.
:param encoding: Data encoding.
:type encoding: str
:param flow_map: Records flow map. Used by :class:`RecordsStateMachine`.
:type: dict
:param chunk_size: Chunk size in bytes. If :const:`None`, emitter record
wouldn't be split into chunks.
:type chunk_size: int
:param bulk_mode: Sends all records for single session (starts from Header
and ends with Terminator records) via single message
instead of sending each record separately. If result
message is too long, it may be split by chunks if
`chunk_size` is not :const:`None`. Keep in mind, that
collecting all records for single session may take some
time and server may reject data by timeout reason.
:type bulk_mode: bool
"""
#: Records state machine controls emitting records in right order. It
#: receives `records_flow_map` as only argument on Emitter initialization.
state_machine = RecordsStateMachine
def __init__(self, emitter, flow_map, encoding,
chunk_size=None, bulk_mode=False):
self._emitter = emitter()
self._is_active = False
self.encoding = encoding
self.records_sm = self.state_machine(flow_map)
# flag to signal that user's emitter produces no records
self.empty = False
# last sent sequence number
self.last_seq = 0
self.buffer = []
self.chunk_size = chunk_size
self.bulk_mode = bulk_mode
def _get_record(self, value=None):
record = self._emitter.send(value if self._is_active else None)
if not self._is_active:
self._is_active = True
if isinstance(record, Record):
record = record.to_astm()
try:
self.records_sm(record[0])
except Exception as err:
self.throw(type(err), err.args)
return record
def _send_record(self, record):
if self.bulk_mode:
records = [record]
while True:
record = self._get_record(True)
records.append(record)
if record[0] == 'L':
break
chunks = encode(records, self.encoding, self.chunk_size)
else:
self.last_seq += 1
chunks = encode([record], self.encoding,
self.chunk_size, self.last_seq)
self.buffer.extend(chunks)
data = self.buffer.pop(0)
self.last_seq += len(self.buffer)
if record[0] == 'L':
self.last_seq = 0
self.buffer.append(EOT)
return data
def send(self, value=None):
"""Passes `value` to the emitter. Semantically acts in same way as
:meth:`send` for generators.
If the emitter has any value within local `buffer` the returned value
will be extracted from it unless `value` is :const:`False`.
:param value: Callback value. :const:`True` indicates that previous
record was successfully received and accepted by server,
:const:`False` signs about his rejection.
:type value: bool
:return: Next record data to send to server.
:rtype: bytes
"""
if self.buffer and value:
return self.buffer.pop(0)
record = self._get_record(value)
return self._send_record(record)
def throw(self, exc_type, exc_val=None, exc_tb=None):
"""Raises exception inside the emitter. Acts in same way as
:meth:`throw` for generators.
If the emitter had catch an exception and return any record value, it
will be proceeded in common way.
"""
record = self._emitter.throw(exc_type, exc_val, exc_tb)
if record is not None:
return self._send_record(record)
def close(self):
"""Closes the emitter. Acts in same way as :meth:`close` for generators.
"""
self._emitter.close()
class Client(ASTMProtocol):
"""Common ASTM client implementation.
:param emitter: Generator function that will produce ASTM records.
:type emitter: function
:param host: Server IP address or hostname.
:type host: str
:param port: Server port number.
:type port: int
:param timeout: Time to wait for response from server. If response wasn't
received, the :meth:`on_timeout` will be called.
If :const:`None` this timer will be disabled.
:type timeout: int
:param flow_map: Records flow map. Used by :class:`RecordsStateMachine`.
:type: dict
:param chunk_size: Chunk size in bytes. :const:`None` value prevents
records chunking.
:type chunk_size: int
:param bulk_mode: Sends all records for single session (starts from Header
and ends with Terminator records) via single message
instead of sending each record separately. If result
message is too long, it may be split by chunks if
`chunk_size` is not :const:`None`. Keep in mind, that
collecting all records for single session may take some
time and server may reject data by timeout reason.
:type bulk_mode: bool
Base `emitter` is a generator that yield ASTM records one by one preserving
their order::
from astm.records import (
HeaderRecord, PatientRecord, OrderRecord, TerminatorRecord
)
def emitter():
assert (yield HeaderRecord()), 'header was rejected'
ok = yield PatientRecord(name={'last': 'foo', 'first': 'bar'})
if ok: # you also can decide what to do in case of record rejection
assert (yield OrderRecord())
yield TerminatorRecord() # we may do not care about rejection
:class:`Client` thought :class:`RecordsStateMachine` keep track
on this order, raising :exc:`AssertionError` if it is broken.
When `emitter` terminates with :exc:`StopIteration` or :exc:`GeneratorExit`
exception client connection to server closing too. You may provide endless
`emitter` by wrapping function body with ``while True: ...`` loop polling
data from source from time to time. Note, that server may have communication
timeouts control and may close session after some time of inactivity, so
be sure that you're able to send whole session (started by Header record and
ended by Terminator one) within limited time frame (commonly 10-15 sec.).
"""
#: Wrapper of emitter to provide session context and system logic about
#: sending head and tail data.
emitter_wrapper = Emitter
def __init__(self, emitter, host='localhost', port=15200,
encoding=None, timeout=20, flow_map=DEFAULT_RECORDS_FLOW_MAP,
chunk_size=None, bulk_mode=False):
super(Client, self).__init__(timeout=timeout)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((host, port))
self.emitter = self.emitter_wrapper(
emitter,
encoding=encoding or self.encoding,
flow_map=flow_map,
chunk_size=chunk_size,
bulk_mode=bulk_mode
)
self.terminator = 1
def handle_connect(self):
"""Initiates ASTM communication session."""
super(Client, self).handle_connect()
self._open_session()
def handle_close(self):
self.emitter.close()
super(Client, self).handle_close()
def _open_session(self):
self.push(ENQ)
def _close_session(self, close_connection=False):
self.push(EOT)
if close_connection:
self.close_when_done()
def run(self, timeout=1.0, *args, **kwargs):
"""Enters into the :func:`polling loop <astm.asynclib.loop>` to let
client send outgoing requests."""
loop(timeout, *args, **kwargs)
def on_enq(self):
"""Raises :class:`NotAccepted` exception."""
raise NotAccepted('Client should not receive ENQ.')
def on_ack(self):
"""Handles ACK response from server.
Provides callback value :const:`True` to the emitter and sends next
message to server.
"""
try:
message = self.emitter.send(True)
except StopIteration:
self._close_session(True)
else:
self.push(message)
if message == EOT:
self._open_session()
def on_nak(self):
"""Handles NAK response from server.
If it was received on ENQ request, the client tries to repeat last
request for allowed amount of attempts. For others it send callback
value :const:`False` to the emitter."""
if self._last_sent_data == ENQ:
return self.push(ENQ)
try:
message = self.emitter.send(False)
except StopIteration:
self._close_session(True)
except Exception:
self._close_session(True)
raise
else:
self.push(message)
if message == EOT:
self._open_session()
def on_eot(self):
"""Raises :class:`NotAccepted` exception."""
raise NotAccepted('Client should not receive EOT.')
def on_message(self):
"""Raises :class:`NotAccepted` exception."""
raise NotAccepted('Client should not receive ASTM message.')
def on_timeout(self):
"""Sends final EOT message and closes connection after his receiving."""
super(Client, self).on_timeout()
self._close_session(True)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import datetime
import decimal
import inspect
import time
import warnings
from operator import itemgetter
from itertools import islice
try:
from itertools import izip_longest
except ImportError: # Python 3
from itertools import zip_longest as izip_longest
from .compat import basestring, unicode, long
def make_string(value):
if isinstance(value, unicode):
return value
elif isinstance(value, bytes):
return unicode(value, 'utf-8')
else:
return unicode(value)
class Field(object):
"""Base mapping field class."""
def __init__(self, name=None, default=None, required=False, length=None):
self.name = name
self.default = default
self.required = required
self.length = length
def __get__(self, instance, owner):
if instance is None:
return self
value = instance._data.get(self.name)
if value is not None:
value = self._get_value(value)
elif self.default is not None:
default = self.default
if hasattr(default, '__call__'):
default = default()
value = default
return value
def __set__(self, instance, value):
if value is not None:
value = self._set_value(value)
instance._data[self.name] = value
def _get_value(self, value):
return value
def _set_value(self, value):
value = make_string(value)
if self.length is not None and len(value) > self.length:
raise ValueError('Field %r value is too long (max %d, got %d)'
'' % (self.name, self.length, len(value)))
return value
class MetaMapping(type):
def __new__(mcs, name, bases, d):
fields = []
names = []
def merge_fields(items):
for name, field in items:
if field.name is None:
field.name = name
if name not in names:
fields.append((name, field))
names.append(name)
else:
fields[names.index(name)] = (name, field)
for base in bases:
if hasattr(base, '_fields'):
merge_fields(base._fields)
merge_fields([(k, v) for k, v in d.items() if isinstance(v, Field)])
if '_fields' not in d:
d['_fields'] = fields
else:
merge_fields(d['_fields'])
d['_fields'] = fields
return super(MetaMapping, mcs).__new__(mcs, name, bases, d)
_MappingProxy = MetaMapping('_MappingProxy', (object,), {}) # Python 3 workaround
class Mapping(_MappingProxy):
def __init__(self, *args, **kwargs):
fieldnames = map(itemgetter(0), self._fields)
values = dict(izip_longest(fieldnames, args))
values.update(kwargs)
self._data = {}
for attrname, field in self._fields:
attrval = values.pop(attrname, None)
if attrval is None:
setattr(self, attrname, getattr(self, attrname))
else:
setattr(self, attrname, attrval)
if values:
raise ValueError('Unexpected kwargs found: %r' % values)
@classmethod
def build(cls, *a):
fields = []
newcls = type('Generic' + cls.__name__, (cls,), {})
for field in a:
if field.name is None:
raise ValueError('Name is required for ordered fields.')
setattr(newcls, field.name, field)
fields.append((field.name, field))
newcls._fields = fields
return newcls
def __getitem__(self, key):
return self.values()[key]
def __setitem__(self, key, value):
setattr(self, self._fields[key][0], value)
def __delitem__(self, key):
self._data[self._fields[key][0]] = None
def __iter__(self):
return iter(self.values())
def __contains__(self, item):
return item in self.values()
def __len__(self):
return len(self._data)
def __eq__(self, other):
if len(self) != len(other):
return False
for key, value in zip(self.keys(), other):
if getattr(self, key) != value:
return False
return True
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join('%s=%r' % (key, value)
for key, value in self.items()))
def keys(self):
return [key for key, field in self._fields]
def values(self):
return [getattr(self, key) for key in self.keys()]
def items(self):
return [(key, getattr(self, key)) for key, field in self._fields]
def to_astm(self):
def values(obj):
for key, field in obj._fields:
value = obj._data[key]
if isinstance(value, Mapping):
yield list(values(value))
elif isinstance(value, list):
stack = []
for item in value:
if isinstance(item, Mapping):
stack.append(list(values(item)))
else:
stack.append(item)
yield stack
elif value is None and field.required:
raise ValueError('Field %r value should not be None' % key)
else:
yield value
return list(values(self))
class Record(Mapping):
"""ASTM record mapping class."""
class Component(Mapping):
"""ASTM component mapping class."""
class TextField(Field):
"""Mapping field for string values."""
def _set_value(self, value):
if not isinstance(value, basestring):
raise TypeError('String value expected, got %r' % value)
return super(TextField, self)._set_value(value)
class ConstantField(Field):
"""Mapping field for constant values.
>>> class Record(Mapping):
... type = ConstantField(default='S')
>>> rec = Record()
>>> rec.type
'S'
>>> rec.type = 'W'
Traceback (most recent call last):
...
ValueError: Field changing not allowed
"""
def __init__(self, name=None, default=None, field=Field()):
super(ConstantField, self).__init__(name, default, True, None)
self.field = field
self.required = True
if self.default is None:
raise ValueError('Constant value should be defined')
def _get_value(self, value):
return self.default
def _set_value(self, value):
value = self.field._get_value(value)
if self.default != value:
raise ValueError('Field changing not allowed: got %r, accepts %r'
'' % (value, self.default))
return super(ConstantField, self)._set_value(value)
class IntegerField(Field):
"""Mapping field for integer values."""
def _get_value(self, value):
return int(value)
def _set_value(self, value):
if not isinstance(value, (int, long)):
try:
value = self._get_value(value)
except Exception:
raise TypeError('Integer value expected, got %r' % value)
return super(IntegerField, self)._set_value(value)
class DecimalField(Field):
"""Mapping field for decimal values."""
def _get_value(self, value):
return decimal.Decimal(value)
def _set_value(self, value):
if not isinstance(value, (int, long, float, decimal.Decimal)):
raise TypeError('Decimal value expected, got %r' % value)
return super(DecimalField, self)._set_value(value)
class DateField(Field):
"""Mapping field for storing date/time values."""
format = '%Y%m%d'
def _get_value(self, value):
return datetime.datetime.strptime(value, self.format)
def _set_value(self, value):
if isinstance(value, basestring):
value = self._get_value(value)
if not isinstance(value, (datetime.datetime, datetime.date)):
raise TypeError('Datetime value expected, got %r' % value)
return value.strftime(self.format)
class TimeField(Field):
"""Mapping field for storing times."""
format = '%H%M%S'
def _get_value(self, value):
if isinstance(value, basestring):
try:
value = value.split('.', 1)[0] # strip out microseconds
value = datetime.time(*time.strptime(value, self.format)[3:6])
except ValueError:
raise ValueError('Value %r does not match format %s'
'' % (value, self.format))
return value
def _set_value(self, value):
if isinstance(value, basestring):
value = self._get_value(value)
if not isinstance(value, (datetime.datetime, datetime.time)):
raise TypeError('Datetime value expected, got %r' % value)
if isinstance(value, datetime.datetime):
value = value.time()
return value.replace(microsecond=0).strftime(self.format)
class DateTimeField(Field):
"""Mapping field for storing date/time values."""
format = '%Y%m%d%H%M%S'
def _get_value(self, value):
return datetime.datetime.strptime(value, self.format)
def _set_value(self, value):
if isinstance(value, basestring):
value = self._get_value(value)
if not isinstance(value, (datetime.datetime, datetime.date)):
raise TypeError('Datetime value expected, got %r' % value)
return value.strftime(self.format)
class SetField(Field):
"""Mapping field for predefined set of values."""
def __init__(self, name=None, default=None,
required=False, length=None,
values=None, field=Field()):
super(SetField, self).__init__(name, default, required, length)
self.field = field
self.values = values and set(values) or set([])
def _get_value(self, value):
return self.field._get_value(value)
def _set_value(self, value):
value = self.field._get_value(value)
if value not in self.values:
raise ValueError('Unexpectable value %r' % value)
return self.field._set_value(value)
class ComponentField(Field):
"""Mapping field for storing record component."""
def __init__(self, mapping, name=None, default=None):
self.mapping = mapping
default = default or mapping()
super(ComponentField, self).__init__(name, default)
def _get_value(self, value):
if isinstance(value, dict):
return self.mapping(**value)
elif isinstance(value, self.mapping):
return value
else:
return self.mapping(*value)
def _set_value(self, value):
if isinstance(value, dict):
return self.mapping(**value)
elif isinstance(value, self.mapping):
return value
if isinstance(value, basestring):
value = [value]
return self.mapping(*value)
class RepeatedComponentField(Field):
"""Mapping field for storing list of record components."""
def __init__(self, field, name=None, default=None):
if isinstance(field, ComponentField):
self.field = field
else:
assert isinstance(field, type) and issubclass(field, Mapping)
self.field = ComponentField(field)
default = default or []
super(RepeatedComponentField, self).__init__(name, default)
class Proxy(list):
def __init__(self, seq, field):
list.__init__(self, seq)
self.list = seq
self.field = field
def _to_list(self):
return [list(self.field._get_value(item)) for item in self.list]
def __add__(self, other):
obj = type(self)(self.list, self.field)
obj.extend(other)
return obj
def __iadd__(self, other):
self.extend(other)
return self
def __mul__(self, other):
return type(self)(self.list * other, self.field)
def __imul__(self, other):
self.list *= other
return self
def __lt__(self, other):
return self._to_list() < other
def __le__(self, other):
return self._to_list() <= other
def __eq__(self, other):
return self._to_list() == other
def __ne__(self, other):
return self._to_list() != other
def __ge__(self, other):
return self._to_list() >= other
def __gt__(self, other):
return self._to_list() > other
def __repr__(self):
return '<ListProxy %s %r>' % (self.list, list(self))
def __str__(self):
return str(self.list)
def __unicode__(self):
return unicode(self.list)
def __delitem__(self, index):
del self.list[index]
def __getitem__(self, index):
return self.field._get_value(self.list[index])
def __setitem__(self, index, value):
self.list[index] = self.field._set_value(value)
def __delslice__(self, i, j):
del self.list[i:j]
def __getslice__(self, i, j):
return self.__class__(self.list[i:j], self.field)
def __setslice__(self, i, j, seq):
self.list[i:j] = [self.field._set_value(v) for v in seq]
def __contains__(self, value):
for item in self:
if item == value:
return True
return False
def __iter__(self):
for index in range(len(self)):
yield self[index]
def __len__(self):
return len(self.list)
def __nonzero__(self):
return bool(self.list)
def __reduce__(self):
return self.list.__reduce__()
def __reduce_ex__(self, *args, **kwargs):
return self.list.__reduce_ex__(*args, **kwargs)
def append(self, item):
self.list.append(self.field._set_value(item))
def count(self, value):
return self._to_list().count(value)
def extend(self, other):
self.list.extend([self.field._set_value(i) for i in other])
def index(self, value, start=None, stop=None):
start = start or 0
for idx, item in enumerate(islice(self, start, stop)):
if item == value:
return idx + start
else:
raise ValueError('%r not in list' % value)
def insert(self, index, object):
self.list.insert(index, self.field._set_value(object))
def remove(self, value):
for item in self:
if item == value:
return self.list.remove(value)
raise ValueError('Value %r not in list' % value)
def pop(self, index=-1):
return self.field._get_value(self.list.pop(index))
def sort(self, cmp=None, key=None, reverse=False):
raise NotImplementedError('In place sorting not allowed.')
# update docstrings from list
for name, obj in inspect.getmembers(Proxy):
if getattr(list, name, None) is None\
or name in ['__module__', '__doc__']:
continue
if not inspect.isfunction(obj):
continue
obj.__doc__ = getattr(list, name).__doc__
del name, obj
def _get_value(self, value):
return self.Proxy(value, self.field)
def _set_value(self, value):
return [self.field._set_value(item) for item in value]
class NotUsedField(Field):
"""Mapping field for value that should be used. Acts as placeholder.
On attempt to assign something to it raises :exc:`UserWarning` and rejects
assigned value."""
def __init__(self, name=None):
super(NotUsedField, self).__init__(name)
def _get_value(self, value):
return None
def _set_value(self, value):
warnings.warn('Field %r is not used, any assignments are omitted'
'' % self.name, UserWarning)
return None
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
__version_info__ = (0, 6, 0, 'dev', 0)
__version__ = '{version}{tag}{build}'.format(
version='.'.join(map(str, __version_info__[:3])),
tag='-' + __version_info__[3] if __version_info__[3] else '',
build='.' + str(__version_info__[4]) if __version_info__[4] else ''
)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
class DummyMixIn(object):
_input_buffer = ''
addr = ('localhost', 15200)
def flush(self):
pass
def close(self):
pass
class CallLogger(object):
def __init__(self, func):
self.func = func
self.was_called = False
def __call__(self, *args, **kwargs):
self.was_called = True
return self.func(*args, **kwargs)
def track_call(func):
return CallLogger(func)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import os
import sys
import unittest
def suite():
suite = unittest.TestSuite()
for root, dirs, files in os.walk('.'):
for file in files:
if not (file.startswith('test_') and file.endswith('.py')):
continue
name = file.split('.')[0]
modname = os.path.join(root, name).replace(os.path.sep, '.')
modname = modname.lstrip('.')
tests = unittest.defaultTestLoader.loadTestsFromName(modname)
for test in tests:
suite.addTests(test)
sys.stdout.write('%s : %s tests%s'
% (modname, tests.countTestCases(), os.linesep))
sys.stdout.flush()
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
"""Common ASTM records structure.
This module contains base ASTM records mappings with only defined common
required fields for most implementations. Others are marked as
:class:`~astm.mapping.NotUsedField` and should be defined explicitly for your
ASTM realisation.
"""
from datetime import datetime
from .mapping import (
Record, ConstantField, DateTimeField, IntegerField, NotUsedField,
TextField, RepeatedComponentField, Component
)
__all__ = ['HeaderRecord', 'PatientRecord', 'OrderRecord',
'ResultRecord', 'CommentRecord', 'TerminatorRecord']
#: +-----+--------------+---------------------------------+-------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+=================================+===================+
#: | 1 | 7.1.1 | ASTM Record Type ID | type |
#: +-----+--------------+---------------------------------+-------------------+
#: | 2 | 7.1.2 | Delimiter Definition | delimeter |
#: +-----+--------------+---------------------------------+-------------------+
#: | 3 | 7.1.3 | Message Control ID | message_id |
#: +-----+--------------+---------------------------------+-------------------+
#: | 4 | 7.1.4 | Access Password | password |
#: +-----+--------------+---------------------------------+-------------------+
#: | 5 | 7.1.5 | Sender Name or ID | sender |
#: +-----+--------------+---------------------------------+-------------------+
#: | 6 | 7.1.6 | Sender Street Address | address |
#: +-----+--------------+---------------------------------+-------------------+
#: | 7 | 7.1.7 | Reserved Field | reserved |
#: +-----+--------------+---------------------------------+-------------------+
#: | 8 | 7.1.8 | Sender Telephone Number | phone |
#: +-----+--------------+---------------------------------+-------------------+
#: | 9 | 7.1.9 | Characteristics of Sender | caps |
#: +-----+--------------+---------------------------------+-------------------+
#: | 10 | 7.1.10 | Receiver ID | receiver |
#: +-----+--------------+---------------------------------+-------------------+
#: | 11 | 7.1.11 | Comments | comments |
#: +-----+--------------+---------------------------------+-------------------+
#: | 12 | 7.1.12 | Processing ID | processing_id |
#: +-----+--------------+---------------------------------+-------------------+
#: | 13 | 7.1.13 | Version Number | version |
#: +-----+--------------+---------------------------------+-------------------+
#: | 14 | 7.1.14 | Date/Time of Message | timestamp |
#: +-----+--------------+---------------------------------+-------------------+
#:
HeaderRecord = Record.build(
ConstantField(name='type', default='H'),
RepeatedComponentField(Component.build(
ConstantField(name='_', default=''),
TextField(name='__')
), name='delimeter', default=[[], ['', '&']]),
# ^^^ workaround to define field:
# ConstantField(name='delimeter', default='\^&'),
NotUsedField(name='message_id'),
NotUsedField(name='password'),
NotUsedField(name='sender'),
NotUsedField(name='address'),
NotUsedField(name='reserved'),
NotUsedField(name='phone'),
NotUsedField(name='caps'),
NotUsedField(name='receiver'),
NotUsedField(name='comments'),
ConstantField(name='processing_id', default='P'),
NotUsedField(name='version'),
DateTimeField(name='timestamp', default=datetime.now, required=True),
)
#: +-----+--------------+---------------------------------+-------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+=================================+===================+
#: | 1 | 8.1.1 | Record Type ID | type |
#: +-----+--------------+---------------------------------+-------------------+
#: | 2 | 8.1.2 | Sequence Number | seq |
#: +-----+--------------+---------------------------------+-------------------+
#: | 3 | 8.1.3 | Practice Assigned Patient ID | practice_id |
#: +-----+--------------+---------------------------------+-------------------+
#: | 4 | 8.1.4 | Laboratory Assigned Patient ID | laboratory_id |
#: +-----+--------------+---------------------------------+-------------------+
#: | 5 | 8.1.5 | Patient ID | id |
#: +-----+--------------+---------------------------------+-------------------+
#: | 6 | 8.1.6 | Patient Name | name |
#: +-----+--------------+---------------------------------+-------------------+
#: | 7 | 8.1.7 | Mother’s Maiden Name | maiden_name |
#: +-----+--------------+---------------------------------+-------------------+
#: | 8 | 8.1.8 | Birthdate | birthdate |
#: +-----+--------------+---------------------------------+-------------------+
#: | 9 | 8.1.9 | Patient Sex | sex |
#: +-----+--------------+---------------------------------+-------------------+
#: | 10 | 8.1.10 | Patient Race-Ethnic Origin | race |
#: +-----+--------------+---------------------------------+-------------------+
#: | 11 | 8.1.11 | Patient Address | address |
#: +-----+--------------+---------------------------------+-------------------+
#: | 12 | 8.1.12 | Reserved Field | reserved |
#: +-----+--------------+---------------------------------+-------------------+
#: | 13 | 8.1.13 | Patient Telephone Number | phone |
#: +-----+--------------+---------------------------------+-------------------+
#: | 14 | 8.1.14 | Attending Physician ID | physician_id |
#: +-----+--------------+---------------------------------+-------------------+
#: | 15 | 8.1.15 | Special Field #1 | special_1 |
#: +-----+--------------+---------------------------------+-------------------+
#: | 16 | 8.1.16 | Special Field #2 | special_2 |
#: +-----+--------------+---------------------------------+-------------------+
#: | 17 | 8.1.17 | Patient Height | height |
#: +-----+--------------+---------------------------------+-------------------+
#: | 18 | 8.1.18 | Patient Weight | weight |
#: +-----+--------------+---------------------------------+-------------------+
#: | 19 | 8.1.19 | Patient’s Known Diagnosis | diagnosis |
#: +-----+--------------+---------------------------------+-------------------+
#: | 20 | 8.1.20 | Patient’s Active Medication | medication |
#: +-----+--------------+---------------------------------+-------------------+
#: | 21 | 8.1.21 | Patient’s Diet | diet |
#: +-----+--------------+---------------------------------+-------------------+
#: | 22 | 8.1.22 | Practice Field No. 1 | practice_field_1 |
#: +-----+--------------+---------------------------------+-------------------+
#: | 23 | 8.1.23 | Practice Field No. 2 | practice_field_2 |
#: +-----+--------------+---------------------------------+-------------------+
#: | 24 | 8.1.24 | Admission/Discharge Dates | admission_date |
#: +-----+--------------+---------------------------------+-------------------+
#: | 25 | 8.1.25 | Admission Status | admission_status |
#: +-----+--------------+---------------------------------+-------------------+
#: | 26 | 8.1.26 | Location | location |
#: +-----+--------------+---------------------------------+-------------------+
#:
PatientRecord = Record.build(
ConstantField(name='type', default='P'),
IntegerField(name='seq', default=1, required=True),
NotUsedField(name='practice_id'),
NotUsedField(name='laboratory_id'),
NotUsedField(name='id'),
NotUsedField(name='name'),
NotUsedField(name='maiden_name'),
NotUsedField(name='birthdate'),
NotUsedField(name='sex'),
NotUsedField(name='race'),
NotUsedField(name='address'),
NotUsedField(name='reserved'),
NotUsedField(name='phone'),
NotUsedField(name='physician_id'),
NotUsedField(name='special_1'),
NotUsedField(name='special_2'),
NotUsedField(name='height'),
NotUsedField(name='weight'),
NotUsedField(name='diagnosis'),
NotUsedField(name='medication'),
NotUsedField(name='diet'),
NotUsedField(name='practice_field_1'),
NotUsedField(name='practice_field_2'),
NotUsedField(name='admission_date'),
NotUsedField(name='admission_status'),
NotUsedField(name='location'),
NotUsedField(name='diagnostic_code_nature'),
NotUsedField(name='diagnostic_code'),
NotUsedField(name='religion'),
NotUsedField(name='martial_status'),
NotUsedField(name='isolation_status'),
NotUsedField(name='language'),
NotUsedField(name='hospital_service'),
NotUsedField(name='hospital_institution'),
NotUsedField(name='dosage_category'),
)
#: +-----+--------------+--------------------------------+--------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+================================+====================+
#: | 1 | 9.4.1 | Record Type ID | type |
#: +-----+--------------+--------------------------------+--------------------+
#: | 2 | 9.4.2 | Sequence Number | seq |
#: +-----+--------------+--------------------------------+--------------------+
#: | 3 | 9.4.3 | Specimen ID | sample_id |
#: +-----+--------------+--------------------------------+--------------------+
#: | 4 | 9.4.4 | Instrument Specimen ID | instrument |
#: +-----+--------------+--------------------------------+--------------------+
#: | 5 | 9.4.5 | Universal Test ID | test |
#: +-----+--------------+--------------------------------+--------------------+
#: | 6 | 9.4.6 | Priority | priority |
#: +-----+--------------+--------------------------------+--------------------+
#: | 7 | 9.4.7 | Requested/Ordered Date/Time | created_at |
#: +-----+--------------+--------------------------------+--------------------+
#: | 8 | 9.4.8 | Specimen Collection Date/Time | sampled_at |
#: +-----+--------------+--------------------------------+--------------------+
#: | 9 | 9.4.9 | Collection End Time | collected_at |
#: +-----+--------------+--------------------------------+--------------------+
#: | 10 | 9.4.10 | Collection Volume | volume |
#: +-----+--------------+--------------------------------+--------------------+
#: | 11 | 9.4.11 | Collector ID | collector |
#: +-----+--------------+--------------------------------+--------------------+
#: | 12 | 9.4.12 | Action Code | action_code |
#: +-----+--------------+--------------------------------+--------------------+
#: | 13 | 9.4.13 | Danger Code | danger_code |
#: +-----+--------------+--------------------------------+--------------------+
#: | 14 | 9.4.14 | Relevant Information | clinical_info |
#: +-----+--------------+--------------------------------+--------------------+
#: | 15 | 9.4.15 | Date/Time Specimen Received | delivered_at |
#: +-----+--------------+--------------------------------+--------------------+
#: | 16 | 9.4.16 | Specimen Descriptor | biomaterial |
#: +-----+--------------+--------------------------------+--------------------+
#: | 17 | 9.4.17 | Ordering Physician | physician |
#: +-----+--------------+--------------------------------+--------------------+
#: | 18 | 9.4.18 | Physician’s Telephone # | physician_phone |
#: +-----+--------------+--------------------------------+--------------------+
#: | 19 | 9.4.19 | User Field No. 1 | user_field_1 |
#: +-----+--------------+--------------------------------+--------------------+
#: | 20 | 9.4.20 | User Field No. 2 | user_field_2 |
#: +-----+--------------+--------------------------------+--------------------+
#: | 21 | 9.4.21 | Laboratory Field No. 1 | laboratory_field_1 |
#: +-----+--------------+--------------------------------+--------------------+
#: | 22 | 9.4.22 | Laboratory Field No. 2 | laboratory_field_2 |
#: +-----+--------------+--------------------------------+--------------------+
#: | 23 | 9.4.23 | Date/Time Reported | modified_at |
#: +-----+--------------+--------------------------------+--------------------+
#: | 24 | 9.4.24 | Instrument Charge | instrument_charge |
#: +-----+--------------+--------------------------------+--------------------+
#: | 25 | 9.4.25 | Instrument Section ID | instrument_section |
#: +-----+--------------+--------------------------------+--------------------+
#: | 26 | 9.4.26 | Report Type | report_type |
#: +-----+--------------+--------------------------------+--------------------+
#:
OrderRecord = Record.build(
ConstantField(name='type', default='O'),
IntegerField(name='seq', default=1, required=True),
NotUsedField(name='sample_id'),
NotUsedField(name='instrument'),
NotUsedField(name='test'),
NotUsedField(name='priority'),
NotUsedField(name='created_at'),
NotUsedField(name='sampled_at'),
NotUsedField(name='collected_at'),
NotUsedField(name='volume'),
NotUsedField(name='collector'),
NotUsedField(name='action_code'),
NotUsedField(name='danger_code'),
NotUsedField(name='clinical_info'),
NotUsedField(name='delivered_at'),
NotUsedField(name='biomaterial'),
NotUsedField(name='physician'),
NotUsedField(name='physician_phone'),
NotUsedField(name='user_field_1'),
NotUsedField(name='user_field_2'),
NotUsedField(name='laboratory_field_1'),
NotUsedField(name='laboratory_field_2'),
NotUsedField(name='modified_at'),
NotUsedField(name='instrument_charge'),
NotUsedField(name='instrument_section'),
NotUsedField(name='report_type'),
NotUsedField(name='reserved'),
NotUsedField(name='location_ward'),
NotUsedField(name='infection_flag'),
NotUsedField(name='specimen_service'),
NotUsedField(name='laboratory')
)
#: +-----+--------------+--------------------------------+--------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+================================+====================+
#: | 1 | 10.1.1 | Record Type ID | type |
#: +-----+--------------+--------------------------------+--------------------+
#: | 2 | 10.1.2 | Sequence Number | seq |
#: +-----+--------------+--------------------------------+--------------------+
#: | 3 | 10.1.3 | Universal Test ID | test |
#: +-----+--------------+--------------------------------+--------------------+
#: | 4 | 10.1.4 | Data or Measurement Value | value |
#: +-----+--------------+--------------------------------+--------------------+
#: | 5 | 10.1.5 | Units | units |
#: +-----+--------------+--------------------------------+--------------------+
#: | 6 | 10.1.6 | Reference Ranges | references |
#: +-----+--------------+--------------------------------+--------------------+
#: | 7 | 10.1.7 | Result Abnormal Flags | abnormal_flag |
#: +-----+--------------+--------------------------------+--------------------+
#: | 8 | 10.1.8 | Nature of Abnormal Testing | abnormality_nature |
#: +-----+--------------+--------------------------------+--------------------+
#: | 9 | 10.1.9 | Results Status | status |
#: +-----+--------------+--------------------------------+--------------------+
#: | 10 | 10.1.10 | Date of Change in Instrument | norms_changed_at |
#: | | | Normative Values | |
#: +-----+--------------+--------------------------------+--------------------+
#: | 11 | 10.1.11 | Operator Identification | operator |
#: +-----+--------------+--------------------------------+--------------------+
#: | 12 | 10.1.12 | Date/Time Test Started | started_at |
#: +-----+--------------+--------------------------------+--------------------+
#: | 13 | 10.1.13 | Date/Time Test Complete | completed_at |
#: +-----+--------------+--------------------------------+--------------------+
#: | 14 | 10.1.14 | Instrument Identification | instrument |
#: +-----+--------------+--------------------------------+--------------------+
#:
ResultRecord = Record.build(
ConstantField(name='type', default='R'),
IntegerField(name='seq', default=1, required=True),
NotUsedField(name='test'),
NotUsedField(name='value'),
NotUsedField(name='units'),
NotUsedField(name='references'),
NotUsedField(name='abnormal_flag'),
NotUsedField(name='abnormality_nature'),
NotUsedField(name='status'),
NotUsedField(name='norms_changed_at'),
NotUsedField(name='operator'),
NotUsedField(name='started_at'),
NotUsedField(name='completed_at'),
NotUsedField(name='instrument'),
)
#: +-----+--------------+---------------------------------+-------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+=================================+===================+
#: | 1 | 11.1.1 | Record Type ID | type |
#: +-----+--------------+---------------------------------+-------------------+
#: | 2 | 11.1.2 | Sequence Number | seq |
#: +-----+--------------+---------------------------------+-------------------+
#: | 3 | 11.1.3 | Comment Source | source |
#: +-----+--------------+---------------------------------+-------------------+
#: | 4 | 11.1.4 | Comment Text | data |
#: +-----+--------------+---------------------------------+-------------------+
#: | 5 | 11.1.5 | Comment Type | ctype |
#: +-----+--------------+---------------------------------+-------------------+
#:
CommentRecord = Record.build(
ConstantField(name='type', default='C'),
IntegerField(name='seq', default=1, required=True),
NotUsedField(name='source'),
NotUsedField(name='data'),
NotUsedField(name='ctype')
)
#: +-----+--------------+---------------------------------+-------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+=================================+===================+
#: | 1 | 13.1.1 | Record Type ID | type |
#: +-----+--------------+---------------------------------+-------------------+
#: | 2 | 13.1.2 | Sequence Number | seq |
#: +-----+--------------+---------------------------------+-------------------+
#: | 3 | 13.1.3 | Termination code | code |
#: +-----+--------------+---------------------------------+-------------------+
#:
TerminatorRecord = Record.build(
ConstantField(name='type', default='L'),
ConstantField(name='seq', default=1, field=IntegerField()),
ConstantField(name='code', default='N')
)
#: +-----+--------------+---------------------------------+-------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+=================================+===================+
#: | 1 | 14.1.1 | Record Type ID | type |
#: +-----+--------------+---------------------------------+-------------------+
#: | 2 | 14.1.2 | Sequence Number | seq |
#: +-----+--------------+---------------------------------+-------------------+
#: | 3 | 14.1.3 | Analytical Method | method |
#: +-----+--------------+---------------------------------+-------------------+
#: | 4 | 14.1.4 | Instrumentation | instrument |
#: +-----+--------------+---------------------------------+-------------------+
#: | 5 | 14.1.5 | Reagents | reagents |
#: +-----+--------------+---------------------------------+-------------------+
#: | 6 | 14.1.6 | Units of Measure | units |
#: +-----+--------------+---------------------------------+-------------------+
#: | 7 | 14.1.7 | Quality Control | qc |
#: +-----+--------------+---------------------------------+-------------------+
#: | 8 | 14.1.8 | Specimen Descriptor | biomaterial |
#: +-----+--------------+---------------------------------+-------------------+
#: | 9 | 14.1.9 | Reserved Field | reserved |
#: +-----+--------------+---------------------------------+-------------------+
#: | 10 | 14.1.10 | Container | container |
#: +-----+--------------+---------------------------------+-------------------+
#: | 11 | 14.1.11 | Specimen ID | sample_id |
#: +-----+--------------+---------------------------------+-------------------+
#: | 12 | 14.1.12 | Analyte | analyte |
#: +-----+--------------+---------------------------------+-------------------+
#: | 13 | 14.1.13 | Result | result |
#: +-----+--------------+---------------------------------+-------------------+
#: | 14 | 14.1.14 | Result Units | result_units |
#: +-----+--------------+---------------------------------+-------------------+
#: | 15 | 14.1.15 | Collection Date and Time | sampled_at |
#: +-----+--------------+---------------------------------+-------------------+
#: | 16 | 14.1.16 | Result Date and Time | completed_at |
#: +-----+--------------+---------------------------------+-------------------+
#: | 17 | 14.1.17 | Analytical Preprocessing Steps | preanalytics |
#: +-----+--------------+---------------------------------+-------------------+
#: | 18 | 14.1.18 | Patient Diagnosis | diagnosis |
#: +-----+--------------+---------------------------------+-------------------+
#: | 19 | 14.1.19 | Patient Birthdate | birthdate |
#: +-----+--------------+---------------------------------+-------------------+
#: | 20 | 14.1.20 | Patient Sex | sex |
#: +-----+--------------+---------------------------------+-------------------+
#: | 21 | 14.1.21 | Patient Race | race |
#: +-----+--------------+---------------------------------+-------------------+
#:
ScientificRecord = Record.build(
ConstantField(name='type', default='S'),
IntegerField(name='seq', default=1, required=True),
NotUsedField(name='method'),
NotUsedField(name='instrument'),
NotUsedField(name='reagents'),
NotUsedField(name='units'),
NotUsedField(name='qc'),
NotUsedField(name='biomaterial'),
NotUsedField(name='reserved'),
NotUsedField(name='container'),
NotUsedField(name='sample_id'),
NotUsedField(name='analyte'),
NotUsedField(name='result'),
NotUsedField(name='result_units'),
NotUsedField(name='sampled_at'),
NotUsedField(name='completed_at'),
NotUsedField(name='preanalytics'),
NotUsedField(name='diagnosis'),
NotUsedField(name='birthdate'),
NotUsedField(name='sex'),
NotUsedField(name='race'),
)
#: +-----+--------------+---------------------------------+-------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+=================================+===================+
#: | 1 | 15.1.1 | Record Type ID | type |
#: +-----+--------------+---------------------------------+-------------------+
#: | 2 | 15.1.2 | Sequence Number | seq |
#: +-----+--------------+---------------------------------+-------------------+
#:
#: .. note::
#: This record, which is similar to the comment record, may be used to send
#: complex structures where use of the existing record types would not be
#: appropriate. The fields within this record type are defined by the
#: manufacturer.
#:
ManufacturerInfoRecord = Record.build(
ConstantField(name='type', default='M'),
IntegerField(name='seq', default=1, required=True),
)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
#: ASTM specification base encoding.
ENCODING = 'latin-1'
#: Message start token.
STX = b'\x02'
#: Message end token.
ETX = b'\x03'
#: ASTM session termination token.
EOT = b'\x04'
#: ASTM session initialization token.
ENQ = b'\x05'
#: Command accepted token.
ACK = b'\x06'
#: Command rejected token.
NAK = b'\x15'
#: Message chunk end token.
ETB = b'\x17'
LF = b'\x0A'
CR = b'\x0D'
#: CR + LF shortcut.
CRLF = CR + LF
#: Message records delimiter.
RECORD_SEP = b'\x0D' # \r #
#: Record fields delimiter.
FIELD_SEP = b'\x7C' # | #
#: Delimeter for repeated fields.
REPEAT_SEP = b'\x5C' # \ #
#: Field components delimiter.
COMPONENT_SEP = b'\x5E' # ^ #
#: Date escape token.
ESCAPE_SEP = b'\x26' # & #
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
"""
``astm.omnilab.server`` - LabOnline server implementation
----------------------------------------------------------
"""
from astm.server import BaseRecordsDispatcher
from astm.mapping import (
Component, ConstantField, ComponentField, DateTimeField, IntegerField,
SetField, TextField, NotUsedField, DateField
)
from .common import (
Header, Terminator, CommonPatient as Patient,
CommonOrder,
CommonResult,
CommonComment,
Sender
)
__all__ = ['RecordsDispatcher',
'Header', 'Patient', 'Order', 'Result', 'Terminator',
'CommentData', 'CompletionDate', 'Instrument', 'Operator',
'Sender', 'Test']
#: Instrument (analyser) information structure.
#:
#: :param _: Reserved. Not used.
#: :type _: None
#:
#: :param rack: Rack number. Length: 5.
#: :type rack: str
#:
#: :param position: Position number. Length: 3.
#: :type position: str
#:
Instrument = Component.build(
TextField(name='_'),
TextField(name='rack', length=5),
TextField(name='position', length=3),
)
#: Test :class:`~astm.mapping.Component` also known as Universal Test ID.
#:
#: :param _: Reserved. Not used.
#: :type _: None
#:
#: :param __: Reserved. Not used.
#: :type __: None
#:
#: :param ___: Reserved. Not used.
#: :type ___: None
#:
#: :param assay_code: Assay code. Required. Length: 20.
#: :type assay_code: str
#:
#: :param assay_name: Assay name. Length: 8.
#: :type assay_name: str
#:
#: :param dilution: Dilution. Length: 10.
#: :type dilution: str
#:
#: :param status: Assay status. Length: 1.
#: :type status: str
#:
#: :param reagent_lot: Reagent lot. Length: 15.
#: :type reagent_lot: str
#:
#: :param reagent_number: Reagent serial number. Length: 5.
#: :type reagent_number: str
#:
#: :param control_lot: Control lot number. Length: 25.
#: :type control_lot: str
#:
#: :param type: Result type value. One of: ``CE``, ``TX``.
#: :type type: str
#:
Test = Component.build(
NotUsedField(name='_'),
NotUsedField(name='__'),
NotUsedField(name='___'),
TextField(name='assay_code', required=True, length=20),
TextField(name='assay_name', length=8),
TextField(name='dilution', length=10),
TextField(name='status', length=1),
TextField(name='reagent_lot', length=15),
TextField(name='reagent_number', length=5),
TextField(name='control_lot', length=25),
SetField(name='type', values=('CE', 'TX'))
)
#: Information about operator that validated results.
#:
#: :param code_on_labonline: Operator code on LabOnline. Length: 12.
#: :type code_on_labonline: str
#:
#: :param code_on_analyzer: Operator code on analyser. Length: 20.
#: :type code_on_analyzer: str
#:
Operator = Component.build(
TextField(name='code_on_labonline', length=12),
TextField(name='code_on_analyzer', length=20),
)
#: Completion date time information.
#:
#: :param labonline: Completion date time on LabOnline.
#: :type labonline: datetime.datetime
#:
#: :param analyzer: Completion date time on analyser.
#: :type analyzer: datetime.datetime
#:
CompletionDate = Component.build(
DateTimeField(name='labonline'),
DateTimeField(name='analyzer'),
)
#: Instrument (analyser) information structure.
#:
#: :param _: Reserved. Not used.
#: :type _: None
#:
#: :param rack: Rack number. Length: 5.
#: :type rack: str
#:
#: :param position: Position number. Length: 3.
#: :type position: str
#:
Instrument = Component.build(
NotUsedField(name='_'),
TextField(name='rack', length=5),
TextField(name='position', length=3),
)
#: Comment control text structure.
#:
CommentData = Component.build(
SetField(name='code', values=('PC', 'RC', 'SC', 'TC',
'CK', 'SE', 'CL', 'TA', 'SS', 'HQ', 'AL', 'PT')),
TextField(name='value'),
TextField(name='field_1'),
TextField(name='field_2'),
TextField(name='field_3'),
TextField(name='field_4'),
TextField(name='field_5'),
)
class Order(CommonOrder):
"""ASTM order record.
:param type: Record Type ID. Always ``O``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param sample_id: Sample ID number. Required. Length: 12.
:type sample_id: str
:param instrument: Instrument specimen ID.
:type instrument: :class:`Instrument`
:param test: Test information structure (aka Universal Test ID).
:type test: :class:`Test`
:param priority: Priority flag. Required. Possible values:
- ``S``: stat; -``R``: routine.
:type priority: str
:param created_at: Ordered date and time. Required.
:type created_at: datetime.datetime
:param sampled_at: Specimen collection date and time.
:type sampled_at: datetime.datetime
:param collected_at: Collection end time. Not used.
:type collected_at: None
:param volume: Collection volume. Not used.
:type volume: None
:param collector: Collector ID. Not used.
:type collector: None
:param action_code: Action code. Required. Possible values:
- :const:`None`: normal order result;
- ``Q``: quality control;
:type action_code: str
:param danger_code: Danger code. Not used.
:type danger_code: None
:param clinical_info: Revelant clinical info. Not used.
:type clinical_info: None
:param delivered_at: Date/time specimen received.
:type delivered_at: None
:param biomaterial: Sample material code. Length: 20.
:type biomaterial: str
:param physician: Ordering Physician. Not used.
:type physician: None
:param physician_phone: Physician's phone number. Not used.
:type physician_phone: None
:param user_field_1: An optional field, it will be send back unchanged to
the host along with the result. Length: 20.
:type user_field_1: str
:param user_field_2: An optional field, it will be send back unchanged to
the host along with the result. Length: 1024.
:type user_field_2: str
:param laboratory_field_1: Laboratory field #1. Not used.
:type laboratory_field_1: None
:param laboratory_field_2: Primary tube code. Length: 12.
:type laboratory_field_2: str
:param modified_at: Date and time of last result modification. Not used.
:type modified_at: None
:param instrument_charge: Instrument charge to computer system. Not used.
:type instrument_charge: None
:param instrument_section: Instrument section id. Not used.
:type instrument_section: None
:param report_type: Report type. Always ``F`` which means final order
request.
:type report_type: str
:param reserved: Reserved. Not used.
:type reserved: None
:param location_ward: Location ward of specimen collection. Not used.
:type location_ward: None
:param infection_flag: Nosocomial infection flag. Not used.
:type infection_flag: None
:param specimen_service: Specimen service. Not used.
:type specimen_service: None
:param laboratory: Production laboratory. Not used.
:type laboratory: None
"""
action_code = SetField(values=(None, 'Q'))
instrument = ComponentField(Instrument)
report_type = ConstantField(default='F')
test = ComponentField(Test)
class Result(CommonResult):
"""ASTM patient record.
:param type: Record Type ID. Always ``R``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param test: Test information structure (aka Universal Test ID).
:type test: :class:`Test`
:param value: Measurement value. Numeric, coded or free text value
depending on result type. Required. Length: 1024.
:type value: None
:param units: Units. Length: 20.
:type units: str
:param references: Normal reference value interval.
:type references: str
:param abnormal_flag: Result abnormal flag. Possible values:
- ``0``: normal result;
- ``1``: result out of normal values;
- ``2``: result out of attention values;
- ``3``: result out of panic values;
+10 Delta-check;
+1000 Device alarm.
Length: 4.
:type abnormal_flag: str
:param abnormality_nature: Nature of abnormality testing. Possible values:
- ``N``: normal value;
- ``L``: below low normal range;
- ``H``: above high normal range;
- ``LL``: below low critical range;
- ``HH``: above high critical range.
:type abnormality_nature: str
:param status: Result status. ``F`` indicates a final result;
``R`` indicating rerun. Length: 1.
:type status: str
:param normatives_changed_at: Date of changes in instrument normative
values or units. Not used.
:type normatives_changed_at: None
:param operator: Operator ID.
:type operator: :class:`Operator`
:param started_at: When works on test was started on.
:type started_at: datetime.datetime
:param completed_at: When works on test was done.
:type completed_at: datetime.datetime
:param instrument: Instrument ID. Required.
:type instrument: :class:`Instrument`
"""
abnormal_flag = SetField(
field=IntegerField(),
length=4,
values=(0, 1, 2, 3,
10, 11, 12, 13,
1000, 1001, 1002, 1003,
1010, 1011, 1012, 1013))
abnormality_nature = SetField(values=('N', 'L', 'H', 'LL', 'HH'))
completed_at = ComponentField(CompletionDate)
created_at = DateField()
instrument = TextField(length=16)
operator = ComponentField(Operator)
references = TextField()
sampled_at = DateField()
started_at = DateTimeField(required=True)
status = SetField(values=('F', 'R'))
test = ComponentField(Test)
units = TextField(length=20)
class Comment(CommonComment):
"""ASTM patient record.
:param type: Record Type ID. Always ``C``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param source: Comment source. Always ``I``.
:type source: str
:param data: Measurement value. Numeric, coded or free text value
depending on result type. Required. Length: 1024.
:type data: :class:`CommentData`
:param ctype: Comment type. Always ``G``.
:type ctype: str
"""
source = ConstantField(default='I')
data = ComponentField(CommentData)
class RecordsDispatcher(BaseRecordsDispatcher):
"""Omnilab specific records dispatcher. Automatically wraps records by
related mappings."""
def __init__(self, *args, **kwargs):
super(RecordsDispatcher, self).__init__(*args, **kwargs)
self.wrappers = {
'H': Header,
'P': Patient,
'O': Order,
'R': Result,
'C': Comment,
'L': Terminator
}
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
"""
``astm.omnilab.client`` - LabOnline client implementation
----------------------------------------------------------
"""
from astm.mapping import (
Component, ConstantField, ComponentField, IntegerField, DateTimeField,
RepeatedComponentField, SetField, TextField, NotUsedField
)
from .common import (
Header, Terminator,
CommonPatient,
CommonOrder,
CommonResult,
CommonComment,
Sender
)
__all__ = ['Header', 'Patient', 'Order', 'Result', 'Comment', 'Terminator',
'CommentData', 'PatientAge', 'Sender', 'Test']
#: Patient age structure.
#:
#: :param value: Age value.
#: :type value: int
#:
#: :param unit: Age unit. One of: ``years``, ``months``, ``days``.
#: :type unit: str
#:
PatientAge = Component.build(
IntegerField(name='value'),
SetField(name='unit', values=('years', 'months', 'days'))
)
#: Test :class:`~astm.mapping.Component` also known as Universal Test ID.
#:
#: :param _: Reserved. Not used.
#: :type _: None
#:
#: :param __: Reserved. Not used.
#: :type __: None
#:
#: :param ___: Reserved. Not used.
#: :type ___: None
#:
#: :param assay_code: Assay code. Required. Length: 20.
#: :type assay_code: str
#:
#: :param assay_name: Assay name. Length: 8.
#: :type assay_name: str
#:
Test = Component.build(
NotUsedField(name='_'),
NotUsedField(name='__'),
NotUsedField(name='___'),
TextField(name='assay_code', required=True, length=20),
TextField(name='assay_name', length=8),
)
#: Comment control data structure.
#:
CommentData = Component.build(
SetField(name='code', values=('PC', 'RC', 'SC', 'TC')),
TextField(name='value')
)
class Patient(CommonPatient):
"""ASTM patient record.
:param type: Record Type ID. Always ``P``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param practice_id: Practice Assigned Patient ID. Required. Length: 12.
:type practice_id: str
:param laboratory_id: Laboratory Assigned Patient ID. Required. Length: 16.
:type laboratory_id: str
:param id: Patient ID. Not used.
:type id: None
:param name: Patient name.
:type name: :class:`PatientName`
:param maiden_name: Mother’s Maiden Name. Not used.
:type maiden_name: None
:param birthdate: Birthdate.
:type birthdate: datetime.date
:param sex: Patient Sex. One of: ``M`` (male), ``F`` (female),
``I`` (animal), ``None`` is unknown.
:type sex: str
:param race: Patient Race-Ethnic Origin. Not used.
:type race: None
:param address: Patient Address. Not used.
:type address: None
:param reserved: Reserved Field. Not used.
:type reserved: None
:param phone: Patient Telephone Number. Not used.
:type phone: None
:param physician_id: Attending Physician. Not used.
:type physician_id: None
:param special_1: Special Field #1. Not used.
:type special_1: None
:param special_2: Patient source. Possible values:
- ``0``: internal patient;
- ``1``: external patient.
:type special_2: int
:param height: Patient Height. Not used.
:type height: None
:param weight: Patient Weight. Not used.
:type weight: None
:param diagnosis: Patient’s Known Diagnosis. Not used.
:type diagnosis: None
:param medications: Patient’s Active Medications. Not used.
:type medications: None
:param diet: Patient’s Diet. Not used.
:type diet: None
:param practice_1: Practice Field No. 1. Not used.
:type practice_1: None
:param practice_2: Practice Field No. 2. Not used.
:type practice_2: None
:param admission_date: Admission/Discharge Dates. Not used.
:type admission_date: None
:param admission_status: Admission Status. Not used.
:type admission_status: None
:param location: Patient location. Length: 20.
:type location: str
:param diagnostic_code_nature: Nature of diagnostic code. Not used.
:type diagnostic_code_nature: None
:param diagnostic_code: Diagnostic code. Not used.
:type diagnostic_code: None
:param religion: Patient religion. Not used.
:type religion: None
:param martial_status: Martian status. Not used.
:type martial_status: None
:param isolation_status: Isolation status. Not used.
:type isolation_status: None
:param language: Language. Not used.
:type language: None
:param hospital_service: Hospital service. Not used.
:type hospital_service: None
:param hospital_institution: Hospital institution. Not used.
:type hospital_institution: None
:param dosage_category: Dosage category. Not used.
:type dosage_category: None
"""
physician_id = TextField(length=35)
special_1 = ComponentField(PatientAge)
class Order(CommonOrder):
"""ASTM order record.
:param type: Record Type ID. Always ``O``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param sample_id: Sample ID number. Required. Length: 12.
:type sample_id: str
:param instrument: Instrument specimen ID. Not used.
:type instrument: None
:param test: Test information structure (aka Universal Test ID).
:type test: :class:`Test`
:param priority: Priority flag. Required. Possible values:
- ``S``: stat; -``R``: routine.
:type priority: str
:param created_at: Ordered date and time. Required.
:type created_at: datetime.datetime
:param sampled_at: Specimen collection date and time.
:type sampled_at: datetime.datetime
:param collected_at: Collection end time. Not used.
:type collected_at: None
:param volume: Collection volume. Not used.
:type volume: None
:param collector: Collector ID. Not used.
:type collector: None
:param action_code: Action code. Required. Possible values:
- ``C``: cancel works for specified tests;
- ``A``: add tests to existed specimen;
- ``N``: create new order;
- ``R``: rerun tests for specified order;
:type action_code: str
:param danger_code: Danger code. Not used.
:type danger_code: None
:param clinical_info: Revelant clinical info. Not used.
:type clinical_info: None
:param delivered_at: Date/time specimen received.
:type delivered_at: None
:param biomaterial: Sample material code. Length: 20.
:type biomaterial: str
:param physician: Ordering Physician. Not used.
:type physician: None
:param physician_phone: Physician's phone number. Not used.
:type physician_phone: None
:param user_field_1: An optional field, it will be send back unchanged to
the host along with the result. Length: 20.
:type user_field_1: str
:param user_field_2: An optional field, it will be send back unchanged to
the host along with the result. Length: 1024.
:type user_field_2: str
:param laboratory_field_1: In multi-laboratory environment it will be used
to indicate which laboratory entering the order.
Length: 20.
:type laboratory_field_1: str
:param laboratory_field_2: Primary tube code. Length: 12.
:type laboratory_field_2: str
:param modified_at: Date and time of last result modification. Not used.
:type modified_at: None
:param instrument_charge: Instrument charge to computer system. Not used.
:type instrument_charge: None
:param instrument_section: Instrument section id. Not used.
:type instrument_section: None
:param report_type: Report type. Always ``O`` which means normal order
request.
:type report_type: str
:param reserved: Reserved. Not used.
:type reserved: None
:param location_ward: Location ward of specimen collection. Not used.
:type location_ward: None
:param infection_flag: Nosocomial infection flag. Not used.
:type infection_flag: None
:param specimen_service: Specimen service. Not used.
:type specimen_service: None
:param laboratory: Production laboratory: in multi-laboratory environment
indicates laboratory expected to process the order.
Length: 20.
:type laboratory: str
"""
action_code = SetField(default='N', values=('C', 'A', 'N', 'R'))
created_at = DateTimeField(required=True)
laboratory = TextField(length=20)
laboratory_field_1 = TextField(length=20)
report_type = ConstantField(default='O')
sampled_at = DateTimeField()
test = RepeatedComponentField(Test)
class Result(CommonResult):
"""ASTM patient record.
:param type: Record Type ID. Always ``R``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param test: Test information structure (aka Universal Test ID).
:type test: :class:`Test`
:param value: Measurement value. Numeric, coded or free text value
depending on result type. Required. Length: 1024.
:type value: None
:param units: Units. Not used.
:type units: None
:param references: Reference ranges. Not used.
:type references: None
:param abnormal_flag: Result abnormal flag. Not used.
:type abnormal_flag: None
:param abnormality_nature: Nature of abnormality testing. Not used.
:type abnormality_nature: None
:param status: Result status. Not used.
:type status: None
:param normatives_changed_at: Date of changes in instrument normative
values or units. Not used.
:type normatives_changed_at: None
:param operator: Operator ID. Not used.
:type operator: None
:param started_at: When works on test was started on. Not used.
:type started_at: None
:param completed_at: When works on test was done. Required.
:type completed_at: datetime.datetime
:param instrument: Instrument ID. Not used.
:type instrument: None
"""
test = ComponentField(Test)
class Comment(CommonComment):
"""ASTM patient record.
:param type: Record Type ID. Always ``C``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param source: Comment source. Always ``L``.
:type source: str
:param data: Measurement value. Numeric, coded or free text value
depending on result type. Required. Length: 1024.
:type data: :class:`CommentData`
:param ctype: Comment type. Always ``G``.
:type ctype: str
"""
source = ConstantField(default='L')
data = ComponentField(CommentData)
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from . import client
from . import server
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from astm import __version__
from astm.mapping import (
Component, ConstantField, ComponentField, DateField, DateTimeField,
IntegerField, SetField, TextField
)
from astm.records import (
HeaderRecord, PatientRecord, OrderRecord, ResultRecord, CommentRecord,
TerminatorRecord
)
#: Information about sender.
#:
#: :param name: Name.
#: :type name: str
#:
#: :param version: Sender software version.
#: :type version: str
#:
Sender = Component.build(
TextField(name='name', default='python-astm'),
TextField(name='version', default=__version__)
)
#: Patient name structure.
#:
#: :param last: Last name. Length: 50.
#: :type last: str
#:
#: :param first: First name. Length: 50.
#: :type first: str
#:
PatientName = Component.build(
TextField(name='last', length=50),
TextField(name='first', length=50)
)
class Header(HeaderRecord):
"""ASTM header record.
:param type: Record Type ID. Always ``H``.
:type type: str
:param delimeter: Delimiter Definition. Always ``\^&``.
:type delimeter: str
:param message_id: Message Control ID. Not used.
:type message_id: None
:param password: Access Password. Not used.
:type password: None
:param sender: Information about sender. Optional.
:type sender: :class:`Sender`
:param address: Sender Street Address. Not used.
:type address: None
:param reserved: Reserved Field. Not used.
:type reserved: None
:param phone: Sender Telephone Number. Not used.
:type phone: None
:param chars: Sender Characteristics. Not used.
:type chars: None
:param receiver: Information about receiver. Not used.
:type receiver: None
:param comments: Comments. Not used.
:type comments: None
:param processing_id: Processing ID. Always ``P``.
:type processing_id: str
:param version: ASTM Version Number. Always ``E 1394-97``.
:type version: str
:param timestamp: Date and Time of Message
:type timestamp: datetime.datetime
"""
sender = ComponentField(Sender)
processing_id = ConstantField(default='P')
version = ConstantField(default='E 1394-97')
class CommonPatient(PatientRecord):
"""ASTM patient record.
:param type: Record Type ID. Always ``P``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param practice_id: Practice Assigned Patient ID. Required. Length: 12.
:type practice_id: str
:param laboratory_id: Laboratory Assigned Patient ID. Required. Length: 16.
:type laboratory_id: str
:param id: Patient ID. Not used.
:type id: None
:param name: Patient name.
:type name: :class:`PatientName`
:param maiden_name: Mother’s Maiden Name. Not used.
:type maiden_name: None
:param birthdate: Birthdate.
:type birthdate: datetime.date
:param sex: Patient Sex. One of: ``M`` (male), ``F`` (female),
``I`` (animal), ``None`` is unknown.
:type sex: str
:param race: Patient Race-Ethnic Origin. Not used.
:type race: None
:param address: Patient Address. Not used.
:type address: None
:param reserved: Reserved Field. Not used.
:type reserved: None
:param phone: Patient Telephone Number. Not used.
:type phone: None
:param physician_id: Attending Physician. Not used.
:type physician_id: None
:param special_1: Special Field #1. Not used.
:type special_1: None
:param special_2: Patient source. Possible values:
- ``0``: internal patient;
- ``1``: external patient.
:type special_2: int
:param height: Patient Height. Not used.
:type height: None
:param weight: Patient Weight. Not used.
:type weight: None
:param diagnosis: Patient’s Known Diagnosis. Not used.
:type diagnosis: None
:param medications: Patient’s Active Medications. Not used.
:type medications: None
:param diet: Patient’s Diet. Not used.
:type diet: None
:param practice_1: Practice Field No. 1. Not used.
:type practice_1: None
:param practice_2: Practice Field No. 2. Not used.
:type practice_2: None
:param admission_date: Admission/Discharge Dates. Not used.
:type admission_date: None
:param admission_status: Admission Status. Not used.
:type admission_status: None
:param location: Patient location. Length: 20.
:type location: str
:param diagnostic_code_nature: Nature of diagnostic code. Not used.
:type diagnostic_code_nature: None
:param diagnostic_code: Diagnostic code. Not used.
:type diagnostic_code: None
:param religion: Patient religion. Not used.
:type religion: None
:param martial_status: Martian status. Not used.
:type martial_status: None
:param isolation_status: Isolation status. Not used.
:type isolation_status: None
:param language: Language. Not used.
:type language: None
:param hospital_service: Hospital service. Not used.
:type hospital_service: None
:param hospital_institution: Hospital institution. Not used.
:type hospital_institution: None
:param dosage_category: Dosage category. Not used.
:type dosage_category: None
"""
birthdate = DateField()
laboratory_id = TextField(required=True, length=16)
location = TextField(length=20)
name = ComponentField(PatientName)
practice_id = TextField(required=True, length=12)
sex = SetField(values=('M', 'F', None, 'I'))
special_2 = SetField(values=(0, 1), field=IntegerField())
class CommonOrder(OrderRecord):
biomaterial = TextField(length=20)
laboratory_field_2 = TextField(length=12)
priority = SetField(default='S', values=('S', 'R'))
sample_id = TextField(required=True, length=12)
user_field_1 = TextField(length=20)
user_field_2 = TextField(length=1024)
class CommonResult(ResultRecord):
completed_at = DateTimeField(required=True)
value = TextField(required=True, length=20)
class CommonComment(CommentRecord):
ctype = ConstantField(default='G')
class Terminator(TerminatorRecord):
"""ASTM terminator record.
:param type: Record Type ID. Always ``L``.
:type type: str
:param seq: Sequential number. Always ``1``.
:type seq: int
:param code: Termination code. Always ``N``.
:type code: str
"""
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from .version import __version__, __version_info__
from .exceptions import BaseASTMError, NotAccepted, InvalidState
from .codec import (
decode, decode_message, decode_record,
encode, encode_message, encode_record,
make_checksum
)
from .mapping import Record, Component
from .records import (
HeaderRecord, PatientRecord, OrderRecord,
ResultRecord, CommentRecord, TerminatorRecord
)
from .protocol import ASTMProtocol
from .client import Client
from .server import RequestHandler, Server
import logging
log = logging.getLogger()
class NullHandler(logging.Handler):
def emit(self, *args, **kwargs):
pass
log.addHandler(NullHandler())
| Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import sys
version = '.'.join(map(str, sys.version_info[:2]))
if version >= '3.0':
basestring = (str, bytes)
unicode = str
bytes = bytes
long = int
def buffer(obj, start=None, stop=None):
memoryview(obj)
if start == None:
start = 0
if stop == None:
stop = len(obj)
x = obj[start:stop]
return x
else:
basestring = basestring
unicode = unicode
b = bytes = str
long = long
buffer = buffer
b = lambda s: isinstance(s, unicode) and s.encode('latin1') or s
u = lambda s: isinstance(s, bytes) and s.decode('utf-8') or s
| Python |
#====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
#
# This software consists of voluntary contributions made by many
# individuals on behalf of the Apache Software Foundation. For more
# information on the Apache Software Foundation, please see
# <http://www.apache.org/>.
#
import os
import re
import tempfile
import shutil
ignore_pattern = re.compile('^(.svn|target|bin|classes)')
java_pattern = re.compile('^.*\.java')
annot_pattern = re.compile('import org\.apache\.http\.annotation\.')
def process_dir(dir):
files = os.listdir(dir)
for file in files:
f = os.path.join(dir, file)
if os.path.isdir(f):
if not ignore_pattern.match(file):
process_dir(f)
else:
if java_pattern.match(file):
process_source(f)
def process_source(filename):
tmp = tempfile.mkstemp()
tmpfd = tmp[0]
tmpfile = tmp[1]
try:
changed = False
dst = os.fdopen(tmpfd, 'w')
try:
src = open(filename)
try:
for line in src:
if annot_pattern.match(line):
changed = True
line = line.replace('import org.apache.http.annotation.', 'import net.jcip.annotations.')
dst.write(line)
finally:
src.close()
finally:
dst.close();
if changed:
shutil.move(tmpfile, filename)
else:
os.remove(tmpfile)
except:
os.remove(tmpfile)
process_dir('.')
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
import os
import sys
import logging
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
# Google App Engine imports.
from google.appengine.ext.webapp import util
# Force Django to reload its settings.
from django.conf import settings
settings._target = None
import django.core.handlers.wsgi
import django.core.signals
import django.db
import django.dispatch.dispatcher
logging.getLogger().setLevel(logging.DEBUG)
django.dispatch.dispatcher.disconnect(
django.db._rollback_on_exception,
django.core.signals.got_request_exception)
def main():
application = django.core.handlers.wsgi.WSGIHandler()
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import *
urlpatterns = patterns('',
# Example:
# (r'^16x16/', include('16x16.foo.urls')),
# Uncomment this for admin:
(r'^send/', 'favicon.views.receiver'),
(r'^contrib/', 'favicon.views.contrib'),
(r'^toggler/', 'favicon.views.toggler'),
(r'^update/', 'favicon.views.update'),
(r'^top/', 'favicon.views.top_x'),
(r'^api/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/(?P<hour>\d{2})/(?P<min>\d{2})/(?P<sec>\d{2})/(?P<micro>\d+)/', 'favicon.views.api'),
(r'^image/(?P<id>.+)', 'favicon.views.image'),
(r'^toggle/(?P<id>.+)', 'favicon.views.toggle_active'),
(r'^$', 'favicon.views.index')
)
| Python |
#from django.db import models
from google.appengine.ext import db
class Favicon(db.Model):
mimetype = db.StringProperty(required=True)
favicon_bytes = db.BlobProperty(required=True)
active = db.BooleanProperty(default=True)
accesses = db.IntegerProperty(default=0)
created_at = db.DateTimeProperty(auto_now_add=True)
modified_at = db.DateTimeProperty(auto_now=True)
class FaviconColor(db.Model):
favicon = db.ReferenceProperty(Favicon)
r = db.IntegerProperty(default=0)
g = db.IntegerProperty(default=0)
b = db.IntegerProperty(default=0)
class FaviconURI(db.Model):
uri = db.StringProperty(required=True)
favicon = db.ReferenceProperty(Favicon)
created_at = db.DateTimeProperty(auto_now_add=True)
modified_at = db.DateTimeProperty(auto_now=True)
class Client(db.Model):
client_id = db.StringProperty(required=True)
created_at = db.DateTimeProperty(auto_now_add=True)
modified_at = db.DateTimeProperty(auto_now=True)
class Access(db.Model):
created_at = db.DateTimeProperty(auto_now_add=True)
favicon = db.ReferenceProperty(Favicon)
favicon_uri = db.ReferenceProperty(FaviconURI)
client = db.ReferenceProperty(Client)
# stats objects
class CountStat(db.Expando):
count = db.IntegerProperty(default=0)
type = db.StringProperty()
since = db.DateTimeProperty()
class DateCountStat(db.Expando):
date = db.DateTimeProperty()
type = db.StringProperty()
count = db.IntegerProperty(default=0)
| Python |
from google.appengine.ext import db
from google.appengine.ext.db import djangoforms
import django
from django import http
from django import shortcuts
from django.core import serializers
from favicon.models import Favicon, FaviconURI, Client, Access
# as soon as we have an imagine library......
| Python |
from favicon.models import Favicon, FaviconURI, Client, Access, CountStat, DateCountStat
from datetime import datetime
def inc_total_favicons():
total_favicons = CountStat.get_or_insert("total_favicons")
total_favicons.count += 1
total_favicons.put()
def get_total_favicons():
total_favicons = CountStat.get_by_key_name("total_favicons")
return total_favicons.count
def inc_total_accesses():
total_accesses = CountStat.get_or_insert("total_accesses")
total_accesses.count += 1
total_accesses.put()
def get_total_accesses():
total_accesses = CountStat.get_by_key_name("total_accesses")
return total_accesses.count
def inc_today_accesses():
today = datetime.today()
total_accesses_today = DateCountStat.get_or_insert("accesses_%04d%02d%02d" % (today.year, today.month, today.day), date=today)
total_accesses_today.type = "accesses"
total_accesses_today.count += 1
total_accesses_today.put()
def get_today_accesses():
today = datetime.today()
total_accesses_today = DateCountStat.get_by_key_name("accesses_%04d%02d%02d" % (today.year, today.month, today.day))
return total_accesses_today.count
def inc_today_favicons():
today = datetime.today()
total_favicons_today = DateCountStat.get_or_insert("favicons_%04d%02d%02d" % (today.year, today.month, today.day), date=today)
total_favicons_today.type = "favicons"
total_favicons_today.count += 1
total_favicons_today.put()
def get_today_favicons():
today = datetime.today()
total_favicons_today = DateCountStat.get_by_key_name("favicons_%04d%02d%02d" % (today.year, today.month, today.day))
return total_favicons_today.count
def get_num_favicons(num):
counts = DateCountStat.all().filter('type =', 'favicons').order('-date').fetch(num)
return [ str(cnt.count) for cnt in counts ]
def inc_today_updates():
today = datetime.today()
total_updates_today = DateCountStat.get_or_insert("updates_%04d%02d%02d" % (today.year, today.month, today.day), date=today)
total_updates_today.type = "updates"
total_updates_today.count += 1
total_updates_today.put()
def get_today_updates():
today = datetime.today()
total_updates_today = DateCountStat.get_by_key_name("updates_%04d%02d%02d" % (today.year, today.month, today.day))
return total_updates_today.count
# favicon fetch methods
def most_recently_accessed(num):
return Access.all().order('-created_at').fetch(num)
def most_accessed(num):
return Favicon.all().filter('active = ', True).order('-accesses').fetch(num)
def most_recently_added(num):
return Favicon.all().filter('active = ', True).order('-created_at').fetch(num)
def datetime_url(dt):
return "/api/%04d/%02d/%02d/%02d/%02d/%02d/%d/" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond)
# hack to make key_name begin with a letter
def md5_key(md5):
return 'a' + md5
| Python |
import logging
import sys
from google.appengine.ext import db
from google.appengine.ext.db import djangoforms
import django
from django import http
from django import shortcuts
from datetime import datetime
from urllib import quote
from favicon.models import Favicon, FaviconURI, Client, Access, CountStat, DateCountStat
from favicon.helpers import *
def image(request, id):
mimetype = "image/png"
favicon = Favicon.get_by_key_name(id)
if favicon.mimetype: mimetype = favicon.mimetype
return http.HttpResponse(favicon.favicon_bytes, mimetype=mimetype)
def receiver(request):
params = {}
# logging.debug("file contents: " + str(request.FILES))
try:
if (len(request.FILES['favicon']['content']) > 51200):
logging.error("someone posted a favicon thats over 50kB (most are < 1kB), no thank you!")
return
# hack, key_name cannot begin with a digit
md5 = md5_key(request.POST['faviconMD5'][:32])
f = Favicon.get_or_insert(md5,
mimetype = request.FILES['favicon']['content-type'],
favicon_bytes = request.FILES['favicon']['content'])
# inc total_favicons
if not f.accesses:
inc_total_favicons()
inc_today_favicons()
f.accesses += 1
f.put()
inc_total_accesses()
inc_today_accesses()
fu = None
if request.POST.has_key('faviconURI'):
fu = FaviconURI(key_name = request.POST['faviconURI'],
uri = request.POST['faviconURI'],
favicon = f)
fu.put()
c = None
if request.POST.has_key('clientID'):
c = Client(key_name = request.POST['clientID'],
client_id = request.POST['clientID'])
c.put()
a = Access(favicon=f, favicon_uri=fu, client=c)
a.put()
except:
logging.error("Unexpected error: " + str(sys.exc_info()))
logging.error(request.FILES)
return shortcuts.render_to_response('index.html', params)
def contrib(request):
params = {}
return shortcuts.render_to_response('contrib.html', params)
def toggle_active(request, id):
favicon = Favicon.get_by_key_name(id)
favicon.active = not favicon.active
favicon.put()
return http.HttpResponseRedirect("/toggler/")
def toggler(request):
params = {}
favicons = Favicon.all().filter('active = ', True)
good_keys = [ f.key().id_or_name() for f in favicons ]
params['favicons_active'] = good_keys
favicons = Favicon.all().filter('active = ', False)
bad_keys = [ f.key().id_or_name() for f in favicons ]
params['favicons_disabled'] = bad_keys
return shortcuts.render_to_response('toggler.html', params)
# TODO: implement charts for data
def get_sparkline(data):
# first stab, not working
max_val = float(max(data))
vals = [ str( (d / max_val)*100 ) for d in data ]
data_string = ",".join(data)
img_string = "http://chart.apis.google.com/chart?chs=100x20&cht=ls&chco=0077CC&chm=B,E6F2FA,0,0,0&chls=1,0,0&chd=t:%s" % (data_string)
return img_string
def top_x(request):
params = {}
params['most_recently_created'] = [ f.key().id_or_name() for f in most_recently_added(10) ]
params['most_recently_accessed'] = [ f.favicon.key().id_or_name() for f in most_recently_accessed(10) ]
params['most_accessed'] = [ f.key().id_or_name() for f in most_accessed(10) ]
params['favicon_cnt'] = get_total_favicons()
params['favicon_today_cnt'] = get_today_favicons()
params['accesses_today_cnt'] = get_today_accesses()
return shortcuts.render_to_response('top.html', params)
def index(request):
params = {}
params['favicon_cnt'] = get_total_favicons()
params['accesses_cnt'] = get_total_accesses()
params['favicon_today_cnt'] = get_today_favicons()
params['accesses_today_cnt'] = get_today_accesses()
return shortcuts.render_to_response('gears.html', params)
def update(request):
inc_today_updates()
return http.HttpResponseRedirect('/update/update.rdf')
def api(request, year, month, day, hour, min, sec, micro):
params = {}
dt = datetime(int(year), int(month), int(day), int(hour), int(min), int(sec), int(micro))
favicons = Favicon.all().filter('active = ', True).filter('created_at > ', dt).order('created_at').fetch(1000)
if not favicons:
params['favicons'] = []
params['next_url'] = datetime_url(dt)
return shortcuts.render_to_response('api.html', params)
keys = [ quote(f.key().id_or_name()) for f in favicons ]
next_url = datetime_url(favicons[-1].created_at)
params['favicons'] = keys
params['next_url'] = next_url
return shortcuts.render_to_response('api.html', params)
| Python |
#!/usr/bin/env python
"""
tesshelper.py -- Utility operations to compare, report stats, and copy
public headers for tesseract 3.0x VS2008 Project
$RCSfile: tesshelper.py,v $ $Revision: 7ca575b377aa $ $Date: 2012/03/07 17:26:31 $
"""
r"""
Requires:
python 2.7 or greater: activestate.com
http://www.activestate.com/activepython/downloads
because using the new argparse module and new literal set syntax (s={1, 2}) .
General Notes:
--------------
Format for a .vcproj file entry:
<File
RelativePath="..\src\allheaders.h"
>
</File>
"""
epilogStr = r"""
Examples:
Assume that tesshelper.py is in c:\buildfolder\tesseract-3.02\vs2008,
which is also the current directory. Then,
python tesshelper .. compare
will compare c:\buildfolder\tesseract-3.02 "library" directories to the
libtesseract Project
(c:\buildfolder\tesseract-3.02\vs2008\libtesseract\libtesseract.vcproj).
python tesshelper .. report
will display summary stats for c:\buildfolder\tesseract-3.02 "library"
directories and the libtesseract Project.
python tesshelper .. copy ..\..\include
will copy all "public" libtesseract header files to
c:\buildfolder\include.
python tesshelper .. clean
will clean the vs2008 folder of all build directories, and .user, .suo,
.ncb, and other temp files.
"""
# imports of python standard library modules
# See Python Documentation | Library Reference for details
import collections
import glob
import argparse
import os
import re
import shutil
import sys
# ====================================================================
VERSION = "1.0 %s" % "$Date: 2012/03/07 17:26:31 $".split()[1]
PROJ_SUBDIR = r"vs2008\libtesseract"
PROJFILE = "libtesseract.vcproj"
NEWHEADERS_FILENAME = "newheaders.txt"
NEWSOURCES_FILENAME = "newsources.txt"
fileNodeTemplate = \
''' <File
RelativePath="..\..\%s"
>
</File>
'''
# ====================================================================
def getProjectfiles(libTessDir, libProjectFile, nTrimChars):
"""Return sets of all, c, h, and resources files in libtesseract Project"""
#extract filenames of header & source files from the .vcproj
projectCFiles = set()
projectHFiles = set()
projectRFiles = set()
projectFilesSet = set()
f = open(libProjectFile, "r")
data = f.read()
f.close()
projectFiles = re.findall(r'(?i)RelativePath="(\.[^"]+)"', data)
for projectFile in projectFiles:
root, ext = os.path.splitext(projectFile.lower())
if ext == ".c" or ext == ".cpp":
projectCFiles.add(projectFile)
elif ext == ".h":
projectHFiles.add(projectFile)
elif ext == ".rc":
projectRFiles.add(projectFile)
else:
print "unknown file type: %s" % projectFile
relativePath = os.path.join(libTessDir, projectFile)
relativePath = os.path.abspath(relativePath)
relativePath = relativePath[nTrimChars:].lower()
projectFilesSet.add(relativePath)
return projectFilesSet, projectHFiles, projectCFiles, projectRFiles
def getTessLibFiles(tessDir, nTrimChars):
"""Return set of all libtesseract files in tessDir"""
libDirs = [
"api",
"ccmain",
"ccstruct",
"ccutil",
"classify",
"cube",
"cutil",
"dict",
r"neural_networks\runtime",
"opencl",
"textord",
"viewer",
"wordrec",
#"training",
r"vs2008\port",
r"vs2008\libtesseract",
]
#create list of all .h, .c, .cpp files in "library" directories
tessFiles = set()
for curDir in libDirs:
baseDir = os.path.join(tessDir, curDir)
for filetype in ["*.c", "*.cpp", "*.h", "*.rc"]:
pattern = os.path.join(baseDir, filetype)
fileList = glob.glob(pattern)
for curFile in fileList:
curFile = os.path.abspath(curFile)
relativePath = curFile[nTrimChars:].lower()
tessFiles.add(relativePath)
return tessFiles
# ====================================================================
def tessCompare(tessDir):
'''Compare libtesseract Project files and actual "sub-library" files.'''
vs2008Dir = os.path.join(tessDir, "vs2008")
libTessDir = os.path.join(vs2008Dir, "libtesseract")
libProjectFile = os.path.join(libTessDir,"libtesseract.vcproj")
tessAbsDir = os.path.abspath(tessDir)
nTrimChars = len(tessAbsDir)+1
print 'Comparing VS2008 Project "%s" with\n "%s"' % (libProjectFile,
tessAbsDir)
projectFilesSet, projectHFiles, projectCFiles, projectRFiles = \
getProjectfiles(libTessDir, libProjectFile, nTrimChars)
tessFiles = getTessLibFiles(tessDir, nTrimChars)
extraFiles = tessFiles - projectFilesSet
print "%2d Extra files (in %s but not in Project)" % (len(extraFiles),
tessAbsDir)
headerFiles = []
sourceFiles = []
sortedList = list(extraFiles)
sortedList.sort()
for filename in sortedList:
root, ext = os.path.splitext(filename.lower())
if ext == ".h":
headerFiles.append(filename)
else:
sourceFiles.append(filename)
print " %s " % filename
print
print "%2d new header file items written to %s" % (len(headerFiles),
NEWHEADERS_FILENAME)
headerFiles.sort()
with open(NEWHEADERS_FILENAME, "w") as f:
for filename in headerFiles:
f.write(fileNodeTemplate % filename)
print "%2d new source file items written to %s" % (len(sourceFiles),
NEWSOURCES_FILENAME)
sourceFiles.sort()
with open(NEWSOURCES_FILENAME, "w") as f:
for filename in sourceFiles:
f.write(fileNodeTemplate % filename)
print
deadFiles = projectFilesSet - tessFiles
print "%2d Dead files (in Project but not in %s" % (len(deadFiles),
tessAbsDir)
sortedList = list(deadFiles)
sortedList.sort()
for filename in sortedList:
print " %s " % filename
# ====================================================================
def tessReport(tessDir):
"""Report summary stats on "sub-library" files and libtesseract Project file."""
vs2008Dir = os.path.join(tessDir, "vs2008")
libTessDir = os.path.join(vs2008Dir, "libtesseract")
libProjectFile = os.path.join(libTessDir,"libtesseract.vcproj")
tessAbsDir = os.path.abspath(tessDir)
nTrimChars = len(tessAbsDir)+1
projectFilesSet, projectHFiles, projectCFiles, projectRFiles = \
getProjectfiles(libTessDir, libProjectFile, nTrimChars)
tessFiles = getTessLibFiles(tessDir, nTrimChars)
print 'Summary stats for "%s" library directories' % tessAbsDir
folderCounters = {}
for tessFile in tessFiles:
tessFile = tessFile.lower()
folder, head = os.path.split(tessFile)
file, ext = os.path.splitext(head)
typeCounter = folderCounters.setdefault(folder, collections.Counter())
typeCounter[ext[1:]] += 1
folders = folderCounters.keys()
folders.sort()
totalFiles = 0
totalH = 0
totalCPP = 0
totalOther = 0
print
print " total h cpp"
print " ----- --- ---"
for folder in folders:
counters = folderCounters[folder]
nHFiles = counters['h']
nCPPFiles = counters['cpp']
total = nHFiles + nCPPFiles
totalFiles += total
totalH += nHFiles
totalCPP += nCPPFiles
print " %5d %3d %3d %s" % (total, nHFiles, nCPPFiles, folder)
print " ----- --- ---"
print " %5d %3d %3d" % (totalFiles, totalH, totalCPP)
print
print 'Summary stats for VS2008 Project "%s"' % libProjectFile
print " %5d %s" %(len(projectHFiles), "Header files")
print " %5d %s" % (len(projectCFiles), "Source files")
print " %5d %s" % (len(projectRFiles), "Resource files")
print " -----"
print " %5d" % (len(projectHFiles) + len(projectCFiles) + len(projectRFiles), )
# ====================================================================
def copyIncludes(fileSet, description, tessDir, includeDir):
"""Copy set of files to specified include dir."""
print
print 'Copying libtesseract "%s" headers to %s' % (description, includeDir)
print
sortedList = list(fileSet)
sortedList.sort()
count = 0
errList = []
for includeFile in sortedList:
filepath = os.path.join(tessDir, includeFile)
if os.path.isfile(filepath):
shutil.copy2(filepath, includeDir)
print "Copied: %s" % includeFile
count += 1
else:
print '***Error: "%s" doesn\'t exist"' % filepath
errList.append(filepath)
print '%d header files successfully copied to "%s"' % (count, includeDir)
if len(errList):
print "The following %d files were not copied:"
for filepath in errList:
print " %s" % filepath
def tessCopy(tessDir, includeDir):
'''Copy all "public" libtesseract Project header files to include directory.
Preserves directory hierarchy.'''
baseIncludeSet = {
r"api\baseapi.h",
r"api\capi.h",
r"api\apitypes.h",
r"ccstruct\publictypes.h",
r"ccmain\thresholder.h",
r"ccutil\host.h",
r"ccutil\basedir.h",
r"ccutil\tesscallback.h",
r"ccutil\unichar.h",
r"ccutil\platform.h",
}
strngIncludeSet = {
r"ccutil\strngs.h",
r"ccutil\memry.h",
r"ccutil\host.h",
r"ccutil\serialis.h",
r"ccutil\errcode.h",
r"ccutil\fileerr.h",
#r"ccutil\genericvector.h",
}
resultIteratorIncludeSet = {
r"ccmain\ltrresultiterator.h",
r"ccmain\pageiterator.h",
r"ccmain\resultiterator.h",
r"ccutil\genericvector.h",
r"ccutil\tesscallback.h",
r"ccutil\errcode.h",
r"ccutil\host.h",
r"ccutil\helpers.h",
r"ccutil\ndminx.h",
r"ccutil\params.h",
r"ccutil\unicharmap.h",
r"ccutil\unicharset.h",
}
genericVectorIncludeSet = {
r"ccutil\genericvector.h",
r"ccutil\tesscallback.h",
r"ccutil\errcode.h",
r"ccutil\host.h",
r"ccutil\helpers.h",
r"ccutil\ndminx.h",
}
blobsIncludeSet = {
r"ccstruct\blobs.h",
r"ccstruct\rect.h",
r"ccstruct\points.h",
r"ccstruct\ipoints.h",
r"ccutil\elst.h",
r"ccutil\host.h",
r"ccutil\serialis.h",
r"ccutil\lsterr.h",
r"ccutil\ndminx.h",
r"ccutil\tprintf.h",
r"ccutil\params.h",
r"viewer\scrollview.h",
r"ccstruct\vecfuncs.h",
}
extraFilesSet = {
#r"vs2008\include\stdint.h",
r"vs2008\include\leptonica_versionnumbers.vsprops",
r"vs2008\include\tesseract_versionnumbers.vsprops",
}
tessIncludeDir = os.path.join(includeDir, "tesseract")
if os.path.isfile(tessIncludeDir):
print 'Aborting: "%s" is a file not a directory.' % tessIncludeDir
return
if not os.path.exists(tessIncludeDir):
os.mkdir(tessIncludeDir)
#fileSet = baseIncludeSet | strngIncludeSet | genericVectorIncludeSet | blobsIncludeSet
fileSet = baseIncludeSet | strngIncludeSet | resultIteratorIncludeSet
copyIncludes(fileSet, "public", tessDir, tessIncludeDir)
copyIncludes(extraFilesSet, "extra", tessDir, includeDir)
# ====================================================================
def tessClean(tessDir):
'''Clean vs2008 folder of all build directories and certain temp files.'''
vs2008Dir = os.path.join(tessDir, "vs2008")
vs2008AbsDir = os.path.abspath(vs2008Dir)
answer = raw_input(
'Are you sure you want to clean the\n "%s" folder (Yes/No) [No]? ' %
vs2008AbsDir)
if answer.lower() not in ("yes",):
return
answer = raw_input('Only list the items to be deleted (Yes/No) [Yes]? ')
answer = answer.strip()
listOnly = answer.lower() not in ("no",)
for rootDir, dirs, files in os.walk(vs2008AbsDir):
for buildDir in ("LIB_Release", "LIB_Debug", "DLL_Release", "DLL_Debug"):
if buildDir in dirs:
dirs.remove(buildDir)
absBuildDir = os.path.join(rootDir, buildDir)
if listOnly:
print "Would remove: %s" % absBuildDir
else:
print "Removing: %s" % absBuildDir
shutil.rmtree(absBuildDir)
if rootDir == vs2008AbsDir:
for file in files:
if file.lower() not in ("tesseract.sln",
"tesshelper.py",
"readme.txt"):
absPath = os.path.join(rootDir, file)
if listOnly:
print "Would remove: %s" % absPath
else:
print "Removing: %s" % absPath
os.remove(absPath)
else:
for file in files:
root, ext = os.path.splitext(file)
if ext.lower() in (".suo",
".ncb",
".user",
) or (
len(ext)>0 and ext[-1] == "~"):
absPath = os.path.join(rootDir, file)
if listOnly:
print "Would remove: %s" % absPath
else:
print "Removing: %s" % absPath
os.remove(absPath)
# ====================================================================
def validateTessDir(tessDir):
"""Check that tessDir is a valid tesseract directory."""
if not os.path.isdir(tessDir):
raise argparse.ArgumentTypeError('Directory "%s" doesn\'t exist.' % tessDir)
projFile = os.path.join(tessDir, PROJ_SUBDIR, PROJFILE)
if not os.path.isfile(projFile):
raise argparse.ArgumentTypeError('Project file "%s" doesn\'t exist.' % projFile)
return tessDir
def validateDir(dir):
"""Check that dir is a valid directory named include."""
if not os.path.isdir(dir):
raise argparse.ArgumentTypeError('Directory "%s" doesn\'t exist.' % dir)
dirpath = os.path.abspath(dir)
head, tail = os.path.split(dirpath)
if tail.lower() != "include":
raise argparse.ArgumentTypeError('Include directory "%s" must be named "include".' % tail)
return dir
def main ():
parser = argparse.ArgumentParser(
epilog=epilogStr,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--version", action="version",
version="%(prog)s " + VERSION)
parser.add_argument('tessDir', type=validateTessDir,
help="tesseract installation directory")
subparsers = parser.add_subparsers(
dest="subparser_name",
title="Commands")
parser_changes = subparsers.add_parser('compare',
help="compare libtesseract Project with tessDir")
parser_changes.set_defaults(func=tessCompare)
parser_report = subparsers.add_parser('report',
help="report libtesseract summary stats")
parser_report.set_defaults(func=tessReport)
parser_copy = subparsers.add_parser('copy',
help="copy public libtesseract header files to includeDir")
parser_copy.add_argument('includeDir', type=validateDir,
help="Directory to copy header files to.")
parser_copy.set_defaults(func=tessCopy)
parser_clean = subparsers.add_parser('clean',
help="clean vs2008 folder of build folders and .user files")
parser_clean.set_defaults(func=tessClean)
#kludge because argparse has no ability to set default subparser
if (len(sys.argv) == 2):
sys.argv.append("compare")
args = parser.parse_args()
#handle commands
if args.func == tessCopy:
args.func(args.tessDir, args.includeDir)
else:
args.func(args.tessDir)
if __name__ == '__main__' :
main()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Zdenko Podobný
# Author: Zdenko Podobný
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple python demo script of tesseract-ocr 3.02 c-api
"""
import os
import sys
import ctypes
# Demo variables
lang = "eng"
filename = "../phototest.tif"
libpath = "/usr/local/lib64/"
libpath_w = "../vs2008/DLL_Release/"
TESSDATA_PREFIX = os.environ.get('TESSDATA_PREFIX')
if not TESSDATA_PREFIX:
TESSDATA_PREFIX = "../"
if sys.platform == "win32":
libname = libpath_w + "libtesseract302.dll"
libname_alt = "libtesseract302.dll"
os.environ["PATH"] += os.pathsep + libpath_w
else:
libname = libpath + "libtesseract.so.3.0.2"
libname_alt = "libtesseract.so.3"
try:
tesseract = ctypes.cdll.LoadLibrary(libname)
except:
try:
tesseract = ctypes.cdll.LoadLibrary(libname_alt)
except WindowsError, err:
print("Trying to load '%s'..." % libname)
print("Trying to load '%s'..." % libname_alt)
print(err)
exit(1)
tesseract.TessVersion.restype = ctypes.c_char_p
tesseract_version = tesseract.TessVersion()[:4]
# We need to check library version because libtesseract.so.3 is symlink
# and can point to other version than 3.02
if float(tesseract_version) < 3.02:
print("Found tesseract-ocr library version %s." % tesseract_version)
print("C-API is present only in version 3.02!")
exit(2)
api = tesseract.TessBaseAPICreate()
rc = tesseract.TessBaseAPIInit3(api, TESSDATA_PREFIX, lang);
if (rc):
tesseract.TessBaseAPIDelete(api)
print("Could not initialize tesseract.\n")
exit(3)
text_out = tesseract.TessBaseAPIProcessPages(api, filename, None , 0);
result_text = ctypes.string_at(text_out)
print result_text
| Python |
'''
Created on Oct 16, 2012
Refactored on Jul 4, 2013
@author: Nils Amiet
'''
import time
import math
from contextlib import contextmanager
@contextmanager
def timer():
'''Context manager used to wrap some code with a timer and print the execution time at the end'''
timer = Timer()
timer.start()
yield
timer.stop()
print(timer)
class Timer:
'''Class used to measure the execution time of some code'''
MILLIS_PER_SECOND = 1000
MILLIS_PER_MINUTE = MILLIS_PER_SECOND * 60
MILLIS_PER_HOUR = MILLIS_PER_MINUTE * 60
MILLIS_PER_DAY = MILLIS_PER_HOUR * 24
def __init__(self):
self.startMillis = 0
self.endMillis = 0
self.totalTimeMillis = 0
def start(self):
self.startMillis = int(time.time() * self.MILLIS_PER_SECOND)
def stop(self):
self.endMillis = int(time.time() * self.MILLIS_PER_SECOND)
self.totalTimeMillis = self.endMillis - self.startMillis
def __repr__(self):
tempTime = self.totalTimeMillis
days = math.floor(tempTime / self.MILLIS_PER_DAY)
tempTime -= days * self.MILLIS_PER_DAY
hours = math.floor(tempTime / self.MILLIS_PER_HOUR)
tempTime -= hours * self.MILLIS_PER_HOUR
minutes = math.floor(tempTime / self.MILLIS_PER_MINUTE)
tempTime -= minutes * self.MILLIS_PER_MINUTE
seconds = math.floor(tempTime / self.MILLIS_PER_SECOND)
tempTime -= seconds * self.MILLIS_PER_SECOND
millis = tempTime
timeString = "%s days, %s hours, %s minutes, %s seconds, %s millis."
timeString %= (days, hours, minutes, seconds, millis)
return timeString | Python |
'''
Created on 11 juin 2013
@author: Nils Amiet
'''
from subprocess import Popen, PIPE
import os
class SentiStrength():
'''Wrapper class for SentiStrength java version'''
RUN_COMMAND = "java -jar"
SENTISTRENGTH_PATH = os.path.join(os.path.dirname(__file__), "SentiStrengthCom.jar")
DATA_PATH = os.path.join(os.path.dirname(__file__), "SentStrength_Data_Sept2011/")
def __init__(self):
pass
def classifiy(self, text):
commandArgs = "%s %s sentidata %s cmd" % (self.RUN_COMMAND, self.SENTISTRENGTH_PATH, self.DATA_PATH)
commandArgs = commandArgs.split(" ")
process = Popen(commandArgs, stdin=PIPE, stdout=PIPE, stderr=PIPE)
text = text.replace(" ", "+")
classification, dummy = process.communicate(text.encode("utf-8"))
polarities = classification.split("\n")
polarities = [self.polarity(line.strip()) for line in polarities]
polarities = [x for x in polarities if x is not None]
return polarities
def polarity(self, line):
val = line.split(" ")
try:
val = [int(x) for x in val]
except:
return None
return sum(val) | Python |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
#from django.contrib import admin
#admin.autodiscover()
urlpatterns = patterns('ITInfluence.views',
# Examples:
# url(r'^$', 'InfrarougeTwitterInfluence.views.home', name='home'),
# url(r'^InfrarougeTwitterInfluence/', include('InfrarougeTwitterInfluence.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'index'),
url(r'^twitter/hashtags/$', 'twitterBrowseHashtags'),
url(r'^twitter/hashtag/(?P<hashtag>.+)/$', 'twitterHashtag'),
url(r'^twitter/$', 'twitterStats'),
url(r'^infrarouge/$', 'infrarougeStats'),
url(r'^twitter/tweets/$', 'twitterBrowseTweets'),
url(r'^twitter/collect/$', 'twitterShowCollectForm'),
url(r'^twitter/stopStreaming/$', 'twitterStopStreaming'),
url(r'^twitter/collect/toggleFollowersCollection/$', 'twitterToggleCollectingFollowers'),
url(r'^infrarouge/forums/$', 'getInfrarougeForums'),
url(r'^twitter/followers-ranking/$', 'twitterFollowersCountRanking'),
# infrarouge images
url(r'^infrarouge/images/ndi/$', 'getInfrarougeNDI'),
url(r'^infrarouge/images/ndi-time/$', 'getInfrarougeNDITimeFigure'),
url(r'^infrarouge/images/ndi-replies-count/$', 'getInfrarougeNDIReplyCountFigure'),
url(r'^infrarouge/images/replies-graph/$', 'getInfrarougeRepliesGraph'),
url(r'^infrarouge/images/user-discussion-graph/$', 'getInfrarougeUserDiscussionGraph'),
# twitter images
url(r'^twitter/images/ndi/$', 'getTwitterNDI'),
url(r'^twitter/images/ndi/(?P<hashtag>.+)/$', 'getTwitterNDISingleHashtag'),
url(r'^twitter/images/ndi-time/$', 'getTwitterNDITimeFigure'),
url(r'^twitter/images/ndi-replies-count/$', 'getTwitterNDIReplyCountFigure'),
url(r'^twitter/images/replies-graph/$', 'getTwitterRepliesGraph'),
url(r'^twitter/images/followers-graph/$', 'getTwitterFollowersGraph'),
# tools
url(r'^tools/showimage/(?P<path>.+)$', 'showImage'),
)
| Python |
# Django settings for InfrarougeTwitterInfluence project.
import os
PROJECT_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.pardir))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'infrarouge': { # SQLite database for Infrarouge data
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_PATH, '../InfrarougeGrabber/infrarouge.db'),
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
},
'default': { # MySQL database for Twitter data
'ENGINE': 'django.db.backends.mysql',
'NAME': 'SocialInfluence',
'USER': 'infrarouge',
'PASSWORD': 'infrarouge',
'HOST': '127.0.0.1',
'PORT': '3306',
},
}
DATABASE_ROUTERS = [
'ITInfluence.DatabaseRouters.TwitterRouter',
]
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '(n1hz*0tl2p--qf@mz*7g6r%5z#lm*gx!-d9cu=ebu$ameht=^'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'InfrarougeTwitterInfluence.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'InfrarougeTwitterInfluence.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_PATH, "templates"),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'ITInfluence',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| Python |
"""
WSGI config for InfrarougeTwitterInfluence project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "InfrarougeTwitterInfluence.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "InfrarougeTwitterInfluence.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| Python |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "InfrarougeTwitterInfluence.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| Python |
'''
Created on 8 juin 2013
@author: Nils Amiet
'''
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import networkx as nx
import math
import random
from django.http.response import HttpResponse
class GraphPlotter():
'''Class used to plot networkx graphs with matplotlib'''
def __init__(self, dpi=100, width=1600, height=900):
self.dpi = dpi
self.width = width
self.height = height
def setFigureSize(self, figure):
w = int(self.width/self.dpi)
h = int(self.height/self.dpi)
figure.set_size_inches(w, h)
def bipartiteNodePositions(self, graph):
'''Compute layout with nice node positions for a bipartite graph'''
bipartiteAttributes = nx.get_node_attributes(graph, "bipartite")
partitionA = [node for node in graph.nodes() if bipartiteAttributes[node] is 0] # bipartite=0
partitionB = [node for node in graph.nodes() if bipartiteAttributes[node] is not 0] # bipartite=1
pos = {}
for node in partitionA:
xCoord = random.uniform(0, 0.2) # random position on the left side
yCoord = random.uniform(0, 1)
pos[node] = [xCoord, yCoord]
for node in partitionB:
xCoord = random.uniform(0.8, 1) # random position on the right side
yCoord = random.uniform(0, 1)
pos[node] = [xCoord, yCoord]
return pos
def memoryPlot(self, graph, bipartite=False, pos=None, nodeSizes=None, nodeColor='r', nodeLabel="Nodes", nodeLabel2="Nodes 2", edgeLabel="Edges"):
'''Plots the network using matplotlib'''
if pos is None:
# pos = nx.spring_layout(graph)
pos = nx.random_layout(graph)
nodeSize = 25
edgeWidth = 0.5
if nodeSizes is not None:
nodeSize = nodeSizes
figure = plt.figure()
rect = (0,0,1,1)
ax = figure.add_axes(rect)
matplotlib.pyplot.axis("off")
if bipartite:
bipartiteAttributes = nx.get_node_attributes(graph, "bipartite")
users = [node for node in graph.nodes() if bipartiteAttributes[node] is 0] # bipartite=0
discussions = [node for node in graph.nodes() if bipartiteAttributes[node] is not 0] # bipartite=1
nx.draw_networkx_nodes(graph, ax=ax, pos=pos, node_size=nodeSize, nodelist=users, label=nodeLabel)
nx.draw_networkx_nodes(graph, ax=ax, pos=pos, node_size=nodeSize, nodelist=discussions, node_color='b', label=nodeLabel2)
nx.draw_networkx_edges(graph, ax=ax, pos=pos, width=edgeWidth, label=edgeLabel)
userCount = len(users)
discussionCount = len(discussions)
edgeCount = len(graph.edges())
graphInfo = str() + "%s users\n%s discussions\n%s edges\n" % (userCount, discussionCount, edgeCount)
figure.text(0,0, graphInfo)
else:
nx.draw_networkx_nodes(graph, ax=ax, pos=pos, node_size=nodeSize, nodelist=graph.nodes(), node_color=nodeColor, label=nodeLabel)
nx.draw_networkx_edges(graph, ax=ax, pos=pos, width=edgeWidth, label=edgeLabel)
nodeCount = len(graph.nodes())
edgeCount = len(graph.edges())
graphInfo = str() + "%s nodes \n%s edges\n" % (nodeCount, edgeCount)
figure.text(0,0, graphInfo)
try:
matplotlib.pyplot.legend()
except:
print("Warning: drawing legend failed")
response = HttpResponse(content_type="image/png")
self.setFigureSize(figure)
figure.savefig(response, format='png', dpi=self.dpi, bbox_inches='tight')
return response
class TwoDimensionalValuesPlotter():
'''Class used to plot 2D datasets with matplotlib'''
def __init__(self, dpi=100, width=1600, height=900):
self.dpi = dpi
self.width = width
self.height = height
def setFigureSize(self, figure):
w = int(self.width/self.dpi)
h = int(self.height/self.dpi)
figure.set_size_inches(w, h)
def plot(self, xValues, yValues, filename, xlabel, ylabel):
figure = plt.figure()
subplot = figure.add_subplot(1, 1, 1)
subplot.set_xlabel(xlabel)
subplot.set_ylabel(ylabel)
subplot.grid(True)
subplot.plot(xValues, yValues)
self.setFigureSize(figure)
plt.savefig(filename, dpi=self.dpi)
def memoryPlotMultipleDatasets(self, datasets, xlabel, ylabel):
'''Plots multiple curves on the same chart'''
figure = plt.figure()
datasetCount = len(datasets)
width = math.ceil(math.sqrt(datasetCount))
for datasetId, dataset in enumerate(datasets):
subplot = figure.add_subplot(width, width, datasetId)
subplot.set_xlabel(xlabel)
subplot.set_ylabel(ylabel)
subplot.grid(True)
title = dataset[0]
xValues = dataset[1][0]
yValues = dataset[1][1]
subplot.set_title(title)
subplot.plot(xValues, yValues)
# plot title/axis/labels font size
for item in ([subplot.title, subplot.xaxis.label, subplot.yaxis.label] + subplot.get_xticklabels() + subplot.get_yticklabels()):
item.set_fontsize(8)
figure.tight_layout()
response = HttpResponse(content_type="image/png")
self.setFigureSize(figure)
figure.savefig(response, format='png')
return response
def memoryPlotMultipleDatasetsMultidimensional(self, datasetsList, xlabel, ylabel):
'''Plots multiple subcharts on the same chart'''
figure = plt.figure()
datasets = zip(*datasetsList)
legendData = [1,2] # dummy values
datasetCount = len(datasets)
width = math.ceil(math.sqrt(datasetCount))
for datasetId, datasetTuple in enumerate(datasets):
subplot = figure.add_subplot(width, width, datasetId)
subplot.set_xlabel(xlabel)
subplot.set_ylabel(ylabel)
subplot.grid(True)
title = datasetTuple[0][0]
for num, dataset in enumerate(datasetTuple):
xValues = dataset[1][0]
yValues = dataset[1][1]
subplot.set_title(title)
legendData[num], = subplot.plot(xValues, yValues, label=num)
# plot title/axis/labels font size
for item in ([subplot.title, subplot.xaxis.label, subplot.yaxis.label] + subplot.get_xticklabels() + subplot.get_yticklabels()):
item.set_fontsize(8)
figure.tight_layout()
figure.legend(legendData, ["equal time intervals", "equal reply count intervals"], loc="lower left", prop={"size": 6})
response = HttpResponse(content_type="image/png")
self.setFigureSize(figure)
figure.savefig(response, format='png')
return response | Python |
# coding: utf-8
'''
Created on 18 juin 2013
@author: Nils Amiet
'''
import nltk
def isEnglishTweet(text):
'''
Checks that the ratio of unknown words in the given text does not exceed a threshold.
Words are checked against an English dictionary of 235k words provided by NLTK
'''
filterList = ["#", "RT", ".", ":", ",", ";", "'", "(", ")", "{", "}", "[", "]", "~", "\"", "?", "!"]
for sign in filterList:
text = text.replace(sign, "")
text = [word for word in text.split(" ") if not word.startswith("http") and not word.startswith("@")]
englishWords = set(w.lower() for w in nltk.corpus.words.words())
textWords = set(w.lower() for w in text)
unknownWords = textWords - englishWords
unknownCount = len(unknownWords)
textCount = len(textWords)
unknownFraction = unknownCount / float(textCount)
threshold = 0.5
return unknownFraction <= threshold | Python |
from django.db import models
import django.db.models.options as options
options.DEFAULT_NAMES = options.DEFAULT_NAMES + ('isTwitterModel',)
# Create your models here.
'''Twitter models'''
class Tweet(models.Model):
class Meta():
isTwitterModel = True
id = models.DecimalField(primary_key=True, max_digits=20, decimal_places=0)
in_reply_to_status_id = models.DecimalField(max_digits=20, decimal_places=0)
user = models.ForeignKey("User")
text = models.TextField()
created_at = models.DateTimeField()
polarity = models.FloatField()
polarity_ready = models.BooleanField()
hashtags = models.TextField()
class User(models.Model):
class Meta():
isTwitterModel = True
id = models.DecimalField(primary_key=True, max_digits=20, decimal_places=0)
screen_name = models.TextField()
statuses_count = models.DecimalField(max_digits=20, decimal_places=0)
friends_count = models.DecimalField(max_digits=20, decimal_places=0)
followers_count = models.DecimalField(max_digits=20, decimal_places=0)
lang = models.TextField()
followers_ready = models.BooleanField()
user_ready = models.BooleanField()
class Friendship(models.Model):
class Meta():
isTwitterModel = True
# user follows followed_user
user = models.ForeignKey("User", related_name="twitter_user_follow_source")
followed_user = models.ForeignKey("User", related_name="twitter_user_follow_destination")
'''Infrarouge models'''
class InfrarougeUser(models.Model):
class Meta():
db_table = "user"
id = models.IntegerField(primary_key=True)
name = models.TextField()
class InfrarougeForum(models.Model):
class Meta():
db_table = "forum"
id = models.IntegerField(primary_key=True)
title = models.TextField()
description = models.TextField()
class InfrarougeForumThread(models.Model):
class Meta():
db_table = "forumthread"
id = models.IntegerField(primary_key=True)
title = models.TextField()
description = models.TextField()
url = models.TextField()
fkparentforum = models.ForeignKey("InfrarougeForum")
fkauthor = models.ForeignKey("InfrarougeUser")
class InfrarougeThreadMessage(models.Model):
class Meta():
db_table = "threadmessage"
id = models.IntegerField(primary_key=True)
message = models.TextField()
fkforumthread = models.ForeignKey("InfrarougeForumThread")
fkauthor = models.ForeignKey("InfrarougeUser")
timestamp = models.DateTimeField()
polarity = models.FloatField()
# does not work because Django doesn't support tables without primary keys or tables with composed primary key (more than 1 column)
class InfrarougeReply(models.Model):
class Meta():
db_table = "reply"
id = models.IntegerField(primary_key=True)
fkfrom = models.ForeignKey("InfrarougeUser", related_name="infrarougeuser_infrarougereply_from")
fkto = models.ForeignKey("InfrarougeUser", related_name="infrarougeuser_infrarougereply_to")
fkthreadmessage = models.ForeignKey("InfrarougeThreadMessage") | Python |
# coding: utf-8
'''
Created on 4 jul 2013
@author: Nils Amiet
'''
import unittest
import os, sys
''' Required for running the script from anywhere outside eclipse'''
os.chdir(os.path.dirname(os.path.realpath(__file__)))
sys.path.append('..')
from SentiStrength.sentistrength import SentiStrength
from Tools.Timer import Timer
from ITInfluence.language import isEnglishTweet
from ITInfluence.polarity import PolarityCounter
class SentiStrengthTest(unittest.TestCase):
'''Tests for SentiStrength'''
def testPerformance(self):
line = "Even though it looked good at first, this movie is utterly bad."
text = line
textCount = 16000 # claims to annotate 16k texts in a second
for unused in range(textCount):
text += "\n" + line
timer = Timer()
timer.start()
s = SentiStrength()
unused = s.classifiy(text)
timer.stop()
# Allow some margin because 16k is not many, it works best with larger amounts.
# For instance with 160k the relative margin is much smaller.
margin = 1000
expectedTime = 1000 + margin
experimentalValue = timer.totalTimeMillis <= expectedTime
expectedValue = True
errorMessage = "Took %s millis." % timer.totalTimeMillis
self.assertEqual(experimentalValue, expectedValue, errorMessage)
class LanguageTest(unittest.TestCase):
'''Tests for the language filter module'''
def testTexts(self):
texts = [
(False, "Je me promène le long de la côte sud-ouest de l'afrique"),
(True, "I live in New York because I think it's the most beautiful city in the world"),
(False, "Je t'aime"),
(True, "I love you"),
# English tweets
(True, "RT @bidon Beau gave Luke a hickey.. Really shouldn't be jealous..@BrooksBeau @luke_brooks WANT ONE:( http://t.co/0uBKDDZSIB"),
(True, "#GGM I love socks too ,me Too. @AHuston_FanPage @NicolasCageFake @elderkeeper @AlbusDumby7 @mondesdegwenn @J0HNNYDepp http://t.co/OsTX4MdxvY"),
(True, "RT @mashable: Don't Bother Following the Fiat Abarth 500 (@Abarth500_DE) on Twitter — It's Too Damn Fast http://t.co/5XopBSt4IS #innovative"),
# Foreign tweets
(False, "Amacın 2 tane ağaçtan çok daha derin olduğu ortada ama TR bölünmez! @yigitbulutt http://t.co/7t3vHf7VDd"),
(False, "RT @MlaniX1: Voila la photo pour vous remercier grâce a vous je suis a plus de 500 followers merci beaucoup <3 http://t.co/Uv5YXmPuvM"),
(False, "RT @ta9ton: 不覚にもイオン閉店に間に合わず隔離された僕は… http://t.co/zRnOnpTFVN"),
]
# We'd like to have the full debug in case more than one text fails
exception = None
for expectedValue, text in texts:
experimentalValue = isEnglishTweet(text)
errorMessage = "False classification of text %s" % text
try:
self.assertEqual(expectedValue, experimentalValue, errorMessage)
except Exception as e:
print(e)
exception = e
if exception is not None:
raise exception
class PolarityTests(unittest.TestCase):
'''Tests for polarity counting'''
def setUp(self):
r1 = {"from": 4, "to": 5, "polarity": 0.8}
r2 = {"from": 5, "to": 4, "polarity": 0.2}
r3 = {"from": 4, "to": 2, "polarity": -0.3}
r4 = {"from": 2, "to": 5, "polarity": 0.1}
self.replies = [r1, r2, r3, r4]
def testPolarity(self):
polarityCounter = PolarityCounter(self.replies)
ranking = polarityCounter.polarityDifferenceRanking()
experimentalValue = ranking[0][0]
# node 2 should be first because it has the greatest polarity difference
expectedValue = 2
errorMessage = "Incorrect ranking. User %s should be first" % expectedValue
self.assertEqual(experimentalValue, expectedValue, errorMessage)
def testNDI(self):
polarityCounter = PolarityCounter(self.replies)
experimentalValue = polarityCounter.NDI
# NDI = sum of squares of differences
# = (0.5)^2 + (0.5)^2 + (1.0)^2
# = 0.25 + 0.25 + 1
# = 1.5
expectedValue = 1.5
errorMessage = "Wrong NDI value. Expected: %s, experimental: %s" % (expectedValue, experimentalValue)
self.assertEqual(experimentalValue, expectedValue, errorMessage)
if __name__ == "__main__":
unittest.main() | Python |
'''
Created on 13 juin 2013
@author: Nils Amiet
'''
from ITInfluence.models import Tweet
def getAllHashtags():
tweets = Tweet.objects.all().exclude(hashtags="")
hashtags = {}
for tweet in tweets:
currentTweetHashtags = tweet.hashtags.split(" ")
for tag in currentTweetHashtags:
try:
hashtags[tag] += 1
except KeyError:
hashtags[tag] = 1
hashtags = [(x,y) for x, y in hashtags.items()]
return hashtags
def getSimilarHashtags(hashtag):
similarHashtags = {}
tweets = Tweet.objects.all().exclude(hashtags="").filter(hashtags__contains=hashtag)
for tweet in tweets:
currentTweetHashtags = tweet.hashtags.split(" ")
try:
currentTweetHashtags.remove(hashtag)
except:
pass
for tag in currentTweetHashtags:
try:
similarHashtags[tag] += 1
except KeyError:
similarHashtags[tag] = 1
similarHashtags = [(x,y) for x, y in similarHashtags.items()]
return similarHashtags | Python |
'''
Created on 10 juin 2013
@author: Nils Amiet
'''
import threading
from ITInfluence.models import User, Friendship
from ITInfluence.twitter import TwitterREST
rest = TwitterREST()
def collectRequest(query):
userRequestPerMinute = 1
followersPerRequest = 5000
val = User.objects.raw(query, [followersPerRequest])
req = val[0].req
try:
minutes = req / userRequestPerMinute
hours = minutes / 60
days = hours / 24
return days, hours, minutes
except:
return 0, 0, 0
def getTotalTimeToCollectFollowers():
'''Returns an estimation of the time required to collect all the followers for the users in the database'''
query = """
SELECT id, sum(IF(followers_count, ceiling(followers_count/%s), 1)) as req
FROM ITInfluence_user
WHERE followers_count > 0
"""
return collectRequest(query)
def getRemainingTimeToCollectFollowers():
'''Returns an estimation of the time required to collect the followers for the users that we didnt get the followers yet.'''
query = """
SELECT id, sum(IF(followers_count, ceiling(followers_count/%s), 1)) as req
FROM ITInfluence_user
WHERE followers_ready=0
"""
return collectRequest(query)
def collectFollowersData():
'''Collect the followers for all users in the database that have not their followers collected yet'''
print("Started collecting followers.")
users = User.objects.all().filter(followers_ready=False).filter(followers_count__gte=1)
for user in users:
# stop collecting if asked
if not rest.isCollectingFollowers:
return
followers = rest.getFollowers(int(user.id), getAll=True) # rate limited 15 every 15 minutes
if followers is None:
print("Stopped collecting followers.")
return # stop collecting
print("Collected %s followers" % len(followers))
print("Inserting collected followers into database...")
for follower in followers:
# stop collecting if asked
if not rest.isCollectingFollowers:
print("Stopped collecting followers.")
return
source = user
try:
destination = User.objects.get(id=follower)
except:
# destination = createUser(follower)
destination = User(id=follower)
destination.followers_ready = True
destination.user_ready = False
destination.statuses_count = 0
destination.friends_count = 0
destination.followers_count = 0
destination.save()
friendship = Friendship(user=destination, followed_user=source)
friendship.save()
user.followers_ready = True
user.save()
print("...done inserting followers for this user!")
print("Stopped collecting followers.")
rest.isCollectingFollowers = False
def runFollowersCollection():
thread = threading.Thread(target=collectFollowersData)
thread.daemon = True
thread.start()
def toggleFollowersCollection():
rest.isCollectingFollowers = not rest.isCollectingFollowers
if rest.isCollectingFollowers:
runFollowersCollection()
def createUser(userId):
user = rest.getUser(userId) # rate limited 180 every 15 minutes
return user
| Python |
'''
Created on 18 juin 2013
@author: Nils Amiet
'''
from ITInfluence.models import Tweet, User, Friendship
import networkx as nx
import math
class TwitterFollowersGraphBuilder():
'''
Builds the followers graph
'''
def __init__(self):
self.graph = nx.DiGraph()
self.nodeSizes = []
self.nodeFriendCounts = []
print("Building graph...")
print("Running user query...")
maxFollowersCount = 500
minFollowersCount = 5000
### You can uncomment one of the following lines with "users = ..." at your choice
# All users that have at most 500 followers
users = User.objects.all().filter(followers_count__lte=maxFollowersCount).exclude(followers_count=0)
# All users that have at least 5000 followers
# users = User.objects.all().filter(followers_count__gte=minFollowersCount).exclude(followers_count=0)
# All users than have at least one follower
# users = User.objects.all().exclude(followers_count=0)
# Don't run this one if you have less than 16GB memory on large networks (>10M)
# users = User.objects.all()
print("done!")
self.buildGraph(users)
print("...done building graph!")
print("Processed %s users" % len(users))
def buildGraph(self, users):
print("Counting users...")
userCount = len(users)
print("done!")
counter = 0
for user in users:
percentage = 100*(float(counter)/userCount)
print("Processing user %s/%s (%s" %(counter, userCount, percentage) + "%)")
userId = int(user.id)
self.graph.add_node(userId)
userWeight = self.getWeight(int(user.followers_count))
friendships = Friendship.objects.all().filter(user__id=int(user.id))
friendshipCount = friendships.count()
friendshipCounter = 0
for friendship in friendships:
percentage = 100*(float(friendshipCounter)/friendshipCount)
print("Processing friendship %s/%s (%s" %(friendshipCounter, friendshipCount, percentage) + "%)"+ " - User: %s" % percentage + "%")
self.graph.add_edge(userId, int(friendship.followed_user.id))
friendshipCounter += 1
counter += 1
nodeCount = len(self.graph.nodes())
counter = 1
for node in self.graph.nodes():
percentage = 100*(float(counter)/nodeCount)
print("Processing node %s/%s (%s" %(counter, nodeCount, percentage) + "%)")
user = User.objects.get(id=node)
userWeight = self.getWeight(int(user.followers_count))
self.nodeSizes += [userWeight]
userFriendsCount = self.graph.out_degree(node)
self.nodeFriendCounts += [userFriendsCount]
counter += 1
def getWeight(self, followers):
nodeSize = 25
logBase = 10
return nodeSize + 4 ** math.log(followers, logBase)
class TwitterGraphBuilder():
'''
Builds the replies graph for collected Twitter data
'''
def __init__(self, hashtagOfInterest="summer"):
'''
Constructor
'''
self.graph = nx.DiGraph()
print("Building graph...")
print("Running tweet query...")
### Uncomment one of the following lines "tweets = ..."
# Use a not so frequent hashtag, fast computation
tweets = Tweet.objects.all().filter(hashtags__icontains=hashtagOfInterest)
# Don't run this if you have less than 32GB memory for large networks (>10M).
# tweets = Tweet.objects.all()
self.buildGraph(tweets)
print("...done building graph!")
def buildGraph(self, tweets):
tweetCount = len(tweets)
counter = 0
for tweet in tweets:
percentage = 100*(float(counter)/tweetCount)
print("Processing tweet %s/%s (%s" %(counter, tweetCount, percentage) + "%)")
author = tweet.user
replyTo = int(tweet.in_reply_to_status_id)
if replyTo is 0:
# it's a reply to all of the author's followers
followers = self.getFollowers(author)
followersCount = len(followers)
followerCounter = 0
for follower in followers:
followerPercentage = 100*(float(followerCounter)/followersCount)
print("Processing follower %s/%s (%s" %(followerCounter, followersCount, followerPercentage) + "%)" + " - Tweet: %s" % percentage + "%")
self.addReply(author, follower.user)
followerCounter += 1
else:
try:
toUser = User.objects.get(replyTo)
self.addReply(author, toUser)
except:
# print("Warning: in_reply_to user doesn't exist")
pass
counter += 1
def getFollowers(self, user):
friendships = Friendship.objects.all().filter(followed_user__id=int(user.id))
return friendships
def addReply(self, fromUser, toUser):
self.graph.add_edge(int(fromUser.id), int(toUser.id)) | Python |
# coding: utf-8
'''
Created on 23 mai 2013
@author: Nils Amiet
'''
class PolarityCounter:
polarityCounts = {}
RECEIVED = "recv"
SENT = "sent"
AVERAGE = "avg"
NDI = -1 # network disagreement index
def __init__(self, replies):
'''
Replies: a list of replies with the attributes "from", "to" and "polarity"
'''
self.replies = replies
self.edges = []
self.countPolarities()
self.computeAveragePolarities()
self.computeNDI()
def countPolarities(self):
for reply in self.replies:
fromUser = reply["from"]
toUser = reply["to"]
polarity = reply["polarity"]
# negative polarity becomes 0 and positive becomes 1
polarity = 0 if polarity <= 0 else 1
try:
fromUserCounts = self.polarityCounts[fromUser][self.SENT]
except KeyError:
self.polarityCounts[fromUser] = {self.RECEIVED : {}, self.SENT: {}, self.AVERAGE: None}
fromUserCounts = self.polarityCounts[fromUser][self.SENT]
try:
toUserCounts = self.polarityCounts[toUser][self.RECEIVED]
except KeyError:
self.polarityCounts[toUser] = {self.RECEIVED : {}, self.SENT: {}, self.AVERAGE: None}
toUserCounts = self.polarityCounts[toUser][self.RECEIVED]
try:
fromUserCounts[polarity] += 1
except:
fromUserCounts[polarity] = 1
try:
toUserCounts[polarity] += 1
except:
toUserCounts[polarity] = 1
def computeAveragePolarities(self):
for user, userCounts in self.polarityCounts.items():
try:
receivedPositive = userCounts[self.RECEIVED][1]
except:
receivedPositive = 0
try:
receivedNegative = userCounts[self.RECEIVED][0]
except:
receivedNegative = 0
try:
sentPositive = userCounts[self.SENT][1]
except:
sentPositive = 0
try:
sentNegative = userCounts[self.SENT][0]
except:
sentNegative = 0
try:
recv = (receivedPositive - receivedNegative) / float(receivedPositive + receivedNegative)
except:
pass # user never received a message
try:
sent = (sentPositive - sentNegative) / float(sentPositive + sentNegative)
except:
pass # user never sent a message
try:
userCounts[self.AVERAGE] = abs(recv - sent)
except:
pass # user never received or sent a message
def computeNDI(self):
self.computeEdges()
sumNDI = 0
for edge in self.edges:
weight = 1
firstUser = edge["from"]
secondUser = edge["to"]
try:
firstUserOpinion = self.polarityCounts[firstUser][self.AVERAGE] / float(2)
except:
firstUserOpinion = 0
try:
secondUserOpinion = self.polarityCounts[secondUser][self.AVERAGE] / float(2)
except:
secondUserOpinion = 0
increment = weight * ((firstUserOpinion - secondUserOpinion)**2)
sumNDI += increment
self.NDI = sumNDI
def computeEdges(self):
for reply in self.replies:
if not self.contains(reply, self.edges):
self.edges += [reply]
def contains(self, reply, edges):
for edge in edges:
if self.isSameMessage(reply, edge):
return True
return False
def isSameMessage(self, reply, reply2):
return reply["from"] == reply2["from"] and reply["to"] == reply2["to"] or reply["from"] == reply2["to"] and reply["to"] == reply2["from"]
def polarityDifferenceRanking(self):
users = {user: userCounts[self.AVERAGE] for user, userCounts in self.polarityCounts.items() if not userCounts[self.AVERAGE] == None}
ranking = sorted(users.items(), key=lambda x: x[1], reverse=True)
return ranking | Python |
'''
Created on 14 juin 2013
@author: Nils Amiet
'''
# coding: utf-8
import networkx as nx
import sqlite3
class InfrarougeGraphBuilder():
userRepliesCounts = {}
graph1 = nx.DiGraph()
userParticipations = []
graph2 = nx.Graph()
def __init__(self, databasePath):
self.infrarougeDatabasePath = databasePath
self.countUserReplies()
self.createRepliesGraph()
self.buildUserParticipations()
self.createParticipationGraph()
def countUserReplies(self):
with sqlite3.connect(self.infrarougeDatabasePath) as connection:
cursor = connection.cursor()
query = "SELECT * FROM reply"
cursor.execute(query)
for reply in cursor:
self.countReply(reply)
cursor.close()
def countReply(self, reply):
fromUser = reply[0]
toUser = reply[1]
fromTo = (fromUser, toUser)
try:
self.userRepliesCounts[fromTo] += 1
except KeyError:
self.userRepliesCounts[fromTo] = 1
def createRepliesGraph(self):
for fromTo, w in self.userRepliesCounts.items():
try:
self.graph1.add_edge(fromTo[0], fromTo[1], weight=w)
except:
print("Warning: adding edge failed")
def buildUserParticipations(self):
with sqlite3.connect(self.infrarougeDatabasePath) as connection:
cursor = connection.cursor()
query = "SELECT * FROM threadmessage"
cursor.execute(query)
for threadMessage in cursor:
forumThread = threadMessage[2]
user = threadMessage[3]
userThreadTuple = (user, forumThread)
self.userParticipations += [userThreadTuple]
cursor.close()
def createParticipationGraph(self):
users = [x[0] for x in self.userParticipations]
discussions = [x[1] for x in self.userParticipations]
self.graph2.add_nodes_from(users, bipartite=0)
self.graph2.add_nodes_from(discussions, bipartite=1)
self.graph2.add_edges_from(self.userParticipations)
| Python |
'''
Created on 11 juin 2013
@author: Nils Amiet
'''
from ITInfluence.polarity import PolarityCounter
from ITInfluence.models import Tweet, User, Friendship
from ITInfluence.hashtags import getAllHashtags
from InfrarougeTwitterInfluence import settings
import sqlite3
import math
infrarougeDatabasePath = settings.DATABASES["infrarouge"]["NAME"]
def getUsers():
getUsers = []
with sqlite3.connect(infrarougeDatabasePath) as connection:
cursor = connection.cursor()
query = "SELECT * FROM user"
cursor.execute(query)
for user in cursor:
getUsers += [user]
cursor.close()
return getUsers
def getForums():
forums = []
with sqlite3.connect(infrarougeDatabasePath) as connection:
cursor = connection.cursor()
query = """
SELECT *
FROM forum
"""
cursor.execute(query)
for forum in cursor:
builtForum = {
"id": forum[0],
"title": forum[1],
"description": forum[2],
}
forums += [builtForum]
return forums
def polarityReplies(forum=None):
'''Returns a list of replies containing the source, destination, polarity, timestamp and forum id'''
replies = []
with sqlite3.connect(infrarougeDatabasePath) as connection:
cursor = connection.cursor()
query = """
SELECT r.fkfrom, r.fkto, m.polarity, m.timestamp, f.id
FROM reply as r
INNER JOIN threadmessage as m
ON r.fkthreadmessage=m.id
INNER JOIN forumthread as t
ON m.fkforumthread=t.id
INNER JOIN forum as f
ON t.fkparentforum=f.id
"""
if forum is not None:
query += "WHERE f.id=" + str(forum)
cursor.execute(query)
for reply in cursor:
builtReply = {
"from": reply[0],
"to": reply[1],
"polarity": reply[2],
"timestamp": reply[3],
"forum": reply[4]
}
replies += [builtReply]
cursor.close()
return replies
def polarizationForAllForums(equalTime=True, equalRepliesCount=True):
forums = getForums()
resultsEqualTime = []
resultsEqualRepliesCount = []
for forum in forums:
forumID = forum["id"]
title = "Forum " + str(forumID)
replies = polarityReplies(forum=forumID)
timeSortedReplies = sorted(replies, key=lambda x: x["timestamp"])
sliceCount = 20
# equal time interval
if equalTime:
res = computePolarizationOverTime(timeSortedReplies, forumID, sliceCount)
resultsEqualTime += [(title, res)]
# equal replies count interval
if equalRepliesCount:
res2 = computePolarizationOverTimeSamePostCount(timeSortedReplies, forumID, sliceCount)
resultsEqualRepliesCount += [(title, res2)]
return resultsEqualTime, resultsEqualRepliesCount
def computePolarizationOverTimeSamePostCount(replies, forumID, sliceCount):
repliesCount = len(replies)
deltaRepliesFloat = repliesCount / float(sliceCount)
deltaReplies = int(math.ceil(repliesCount / sliceCount))
if deltaRepliesFloat is not deltaReplies:
deltaReplies += 1
ndiVariation = []
replyOffset = 0
while replyOffset < repliesCount:
upperOffset = replyOffset + deltaReplies + 1
r = replies[replyOffset:upperOffset]
polarityCounter = PolarityCounter(r)
edgesCount = len(polarityCounter.edges)
ndiVariation += [(int(replyOffset), polarityCounter.NDI, edgesCount)]
replyOffset += deltaReplies
times = [x[0] for x in ndiVariation]
times = [i for i, unused in enumerate(times)]
ndis = [x[1] for x in ndiVariation]
return times, ndis
def computePolarizationOverTime(replies, forumID, sliceCount):
first = replies[0]["timestamp"]
last = replies[-1]["timestamp"]
interval = last - first
deltaTimeMillis = interval / sliceCount
ndiVariation = []
timeThreshold = first + deltaTimeMillis
while timeThreshold <= last:
lowerBound = timeThreshold - deltaTimeMillis
upperBound = timeThreshold
r = repliesInTimeInterval(replies, lowerBound, upperBound)
polarityCounter = PolarityCounter(r)
ndiVariation += [(int(timeThreshold), polarityCounter.NDI)]
timeThreshold += deltaTimeMillis
times = [x[0] for x in ndiVariation]
times = [i for i, unused in enumerate(times)]
ndis = [x[1] for x in ndiVariation]
return times, ndis
def repliesInTimeInterval(replies, lowerBound, upperBound):
return [reply for reply in replies if reply["timestamp"] >= lowerBound and reply["timestamp"] <= upperBound ]
'''Twitter'''
def getNDIForMostFrequentHashtags(equalTime=True, equalRepliesCount=True):
hashtags = mostFrequentHashtags()
print("Most frequent hashtags:")
print(hashtags)
resultsEqualTimeIntervals = []
resultsEqualRepliesCountIntervals = []
sliceCount = 20
# Adjust this value as you wish
# Greater value = More accurate results but more computation time
maxFollowersCount = 50
for hashtag in hashtags:
eqTime, eqReplyCount = getNDIForHashtag(hashtag, sliceCount=sliceCount, maxFollowersCount=maxFollowersCount, equalTime=equalTime, equalRepliesCount=equalRepliesCount)
resultsEqualTimeIntervals += eqTime
resultsEqualRepliesCountIntervals += eqReplyCount
return resultsEqualTimeIntervals, resultsEqualRepliesCountIntervals
def getNDIForHashtag(hashtag, sliceCount=20, maxFollowersCount=100, equalTime=True, equalRepliesCount=True):
resultsEqualTimeIntervals = []
resultsEqualRepliesCountIntervals = []
print("Computing NDIs for hashtag %s..." % hashtag)
### Uncomment one of the following lines "tweets = ..."
# Users with at most x followers, fast computation
tweets = Tweet.objects.all().filter(user__followers_count__lte=maxFollowersCount).filter(hashtags__icontains=hashtag)
# Complete dataset: Requires a lot of memory and a fast CPU for large networks
# tweets = Tweet.objects.all().filter(hashtags__icontains=hashtag)
timeSortedTweets = sorted(tweets, key=lambda x: x.created_at)
if equalTime:
ndis = getNDIValuesForHashtag(hashtag, sliceCount, timeSortedTweets)
resultsEqualTimeIntervals += [("#" + hashtag, ndis)]
if equalRepliesCount:
ndis = getNDIValuesEqualRepliesCountForHashtag(hashtag, sliceCount, timeSortedTweets)
resultsEqualRepliesCountIntervals += [("#" + hashtag, ndis)]
return resultsEqualTimeIntervals, resultsEqualRepliesCountIntervals
def mostFrequentHashtags():
hashtagCount = 6 # n most frequent
hashtags = getAllHashtags()
hashtags = sorted(hashtags, key=lambda x: x[1], reverse=True) # sort by number of occurences
hashtags = [tag.lower() for tag, unused in hashtags]
hashtags = convertListToSetKeepingOrder(hashtags)
hashtags = hashtags[:hashtagCount]
return hashtags
def convertListToSetKeepingOrder(xs):
# Inspired from http://stackoverflow.com/a/480227
seen = set()
addToSet = seen.add
return [x for x in xs if x not in seen and not addToSet(x)]
def computeReplies(tweets):
replies = []
for tweet in tweets:
authorId = int(tweet.user.id)
polarity = float(tweet.polarity)
replyTo = int(tweet.in_reply_to_status_id)
if replyTo is not 0: # reply to single person
try:
user = User.objects.get(id=replyTo)
userId = int(user.id)
builtReply = {
"from": authorId,
"to": userId,
"polarity": polarity,
}
replies += [builtReply]
except:
# reply to all his/her followers
replies += getFollowerReplies(authorId, polarity)
else:
# reply to all his/her followers
replies += getFollowerReplies(authorId, polarity)
return replies
def getFollowerReplies(authorId, polarity):
replies = []
friendships = Friendship.objects.all().filter(followed_user__id=authorId)
followers = [int(f.user.id) for f in friendships]
for follower in followers:
builtReply = {
"from": authorId,
"to": follower,
"polarity": polarity,
}
replies += [builtReply]
return replies
def getNDIValuesEqualRepliesCountForHashtag(hashtag, sliceCount, timeSortedTweets):
ndiVariation = []
if len(timeSortedTweets) >= 2:
tweetCount = len(timeSortedTweets)
deltaTweetsFloat = tweetCount / float(sliceCount)
deltaTweets = int(math.ceil(tweetCount / sliceCount))
if deltaTweetsFloat is not deltaTweets:
deltaTweets += 1
tweetOffset = 0
sliceIndex = 1
while tweetOffset < tweetCount:
print("Computing slice %s/%s" %(sliceIndex, sliceCount))
upperOffset = tweetOffset + deltaTweets + 1
tweetsSubset = timeSortedTweets[tweetOffset:upperOffset]
replies = computeReplies(tweetsSubset)
polarityCounter = PolarityCounter(replies)
ndiVariation += [(int(tweetOffset), polarityCounter.NDI)]
tweetOffset += deltaTweets
sliceIndex += 1
times = [x[0] for x in ndiVariation]
times = [i for i, unused in enumerate(times)]
ndis = [x[1] for x in ndiVariation]
return times, ndis
def getNDIValuesForHashtag(hashtag, sliceCount, timeSortedTweets):
ndiVariation = []
if len(timeSortedTweets) >= 2:
first = timeSortedTweets[0].created_at
last = timeSortedTweets[-1].created_at
interval = last - first
deltaTimeMillis = interval / sliceCount
sliceIndex = 1
timeUpperBound = first + deltaTimeMillis
while timeUpperBound <= last:
print("Computing slice %s/%s" %(sliceIndex, sliceCount))
tweetsSubset = tweetsInInterval(timeSortedTweets, timeUpperBound - deltaTimeMillis, timeUpperBound)
replies = computeReplies(tweetsSubset)
polarityCounter = PolarityCounter(replies)
ndiVariation += [(timeUpperBound, polarityCounter.NDI)]
timeUpperBound += deltaTimeMillis
if timeUpperBound == timeUpperBound + deltaTimeMillis:
break
sliceIndex += 1
times = [x[0] for x in ndiVariation]
times = [i for i, unused in enumerate(times)]
ndis = [x[1] for x in ndiVariation]
return times, ndis
def tweetsInInterval(tweets, timeLowerBound, timeUpperBound):
return [tweet for tweet in tweets if tweet.created_at >= timeLowerBound and tweet.created_at <= timeUpperBound] | Python |
'''
Created on 7 juin 2013
@author: Nils Amiet
'''
import threading
import time
import math
from twython import TwythonStreamer, Twython, TwythonRateLimitError
from twython.exceptions import TwythonError
from requests.exceptions import ConnectionError
from django.db import DatabaseError
from ITInfluence.models import Tweet, User
from ITInfluence.language import isEnglishTweet
from ITInfluence.hashtags import getSimilarHashtags
###
# Configuration starts here
###
'''
These values are provided for your personal use but since the code is distributed to multiple people,
and there can be only one machine using this account at the same time,
please use the values from your own Twitter account.
Register a regular Twitter account and then register an app at http://dev.twitter.com/apps
'''
APP_KEY = "pjoZEliZOQNl9D4tLGljA"
APP_SECRET = "1Wp2Jd2eiKVWYH8nJFm3wg6C3bnf10k1D25uzajloU"
OAUTH_TOKEN = "1479001058-kJibhcFtcHvUKvbFnKgUO8zPlTpb2MC6HCr1Z1z"
OAUTH_TOKEN_SECRET = "KmhIChKm9nENIXt5P5xfotOgxlaI9JfDBy3eZ4ZVKDc"
'''
Enables or disables the stream monitor.
The stream monitor periodically checks whether the incoming tweet rate is too low.
In such a case it restarts the stream with additional similar hashtags.
'''
isStreamMonitorEnabled = True
'''
Number of seconds between each stream monitor check.
'''
monitorCheckInterval = 300 # seconds
'''
Minimum number of incoming tweets per minute required.
Used by the stream monitor to decide whether it should restart with similar hashtags.
'''
incomingTweetRateThreshold = 0.5 # minimum tweets per minute
'''
Number of seconds after which the stream is stopped if not a single tweet was received.
REMARK: You should not set streamTimeout to a value that is less than monitorCheckInterval because
it may cause having two or more streams opened at the same time, which is forbidden
by the Twitter API.
'''
streamTimeout = 180 # seconds
###
# Configuration ends here
###
streamingHashtag = ""
class TwitterREST():
'''Class to handle operations that require a call to the Twitter REST API'''
TIME_INTERVAL_IN_SECONDS = (15 * 60) + 2 # add 2 seconds just to be sure
MAX_FOLLOWERS_PER_REQUEST = 5000
MAX_RETRY_COUNT = 30
RETRY_INTERVAL_IN_SECONDS = 30
isCollectingFollowers = False
def __init__(self):
self.twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
def stopCollecting(self):
self.isCollectingFollowers = False
def sleepFor(self, seconds):
startTime = time.time()
for unused in range(seconds):
time.sleep(1)
if not self.isCollectingFollowers:
return False
# make sure we slept for long enough
while True:
currentTime = time.time()
if currentTime - startTime < seconds:
time.sleep(1)
else:
break
return True
def getFollowers(self, userId, getAll=False, retry=0):
self.isCollectingFollowers = True
try:
totalFollowersCount = int(User.objects.get(id=userId).followers_count)
except:
totalFollowersCount = 0
requiredCallsCount = int(math.ceil(totalFollowersCount/float(self.MAX_FOLLOWERS_PER_REQUEST)))
partCount = 1
# define rate limited operation
def doGetFollowers(**kwargs):
try:
return self.twitter.get_followers_ids(user_id=userId, **kwargs)
except ConnectionError as e:
print(e)
print("ConnectionError! Retrying...")
self.sleepFor(10)
return doGetFollowers(**kwargs)
try:
self.displayFollowersCollectionStatus(userId, partCount, requiredCallsCount)
returnValue = doGetFollowers()
except TwythonRateLimitError:
# wait a complete Twitter time interval
print("Sleeping until limit is lifted...")
if not self.sleepFor(self.TIME_INTERVAL_IN_SECONDS):
return # stop collecting
# try again
print("Retrying...")
self.displayFollowersCollectionStatus(userId, partCount, requiredCallsCount)
returnValue = doGetFollowers()
except TwythonError as e:
# User does not exist
# User's account was probably deleted after we collected his tweets
if "404" in str(e):
print("WARNING: User %s does not exist!" % userId)
return []
if "401" in str(e):
print("WARNING: User %s is probably private! Can't collect his/her followers." % userId)
return []
# Twitter is down / API changed / unknown error
# wait a few seconds and retry
print(e)
print("Retrying in %s seconds..." % self.RETRY_INTERVAL_IN_SECONDS)
if not self.sleepFor(self.RETRY_INTERVAL_IN_SECONDS):
return
if retry > self.MAX_RETRY_COUNT:
print("Max retry count reached. Aborting...")
return
print("Retrying... attempt %s/%s" % (retry, self.MAX_RETRY_COUNT))
return self.getFollowers(userId, getAll=getAll, retry=retry + 1)
partCount += 1
followers = returnValue["ids"]
if getAll:
while returnValue["next_cursor"] is not 0:
try:
self.displayFollowersCollectionStatus(userId, partCount, requiredCallsCount)
returnValue = doGetFollowers(cursor=returnValue["next_cursor"])
except TwythonRateLimitError:
# wait a complete Twitter time interval
print("Sleeping until limit is lifted...")
if not self.sleepFor(self.TIME_INTERVAL_IN_SECONDS):
return # stop collecting
# try again
print("Retrying...")
self.displayFollowersCollectionStatus(userId, partCount, requiredCallsCount)
returnValue = doGetFollowers(cursor=returnValue["next_cursor"])
except TwythonError as e:
# Twitter is down / API changed / unknown error
# wait a few seconds and retry
print(e)
print("Retrying in %s seconds..." % self.RETRY_INTERVAL_IN_SECONDS)
if not self.sleepFor(self.RETRY_INTERVAL_IN_SECONDS):
return
if retry > self.MAX_RETRY_COUNT:
print("Max retry count reached. Aborting...")
return
print("Retrying... attempt %s/%s" % (retry, self.MAX_RETRY_COUNT))
return self.getFollowers(userId, getAll=getAll, retry=retry + 1)
partCount += 1
followers += returnValue["ids"]
elif returnValue["next_cursor"] is not 0:
print("Warning: you specified getAll=False but there was more than %s followers. Results are truncated!" % self.MAX_FOLLOWERS_PER_REQUEST)
return followers
def displayFollowersCollectionStatus(self, userId, partCount, requiredCallsCount):
print("Getting followers for user %s (part %s/%s)..." % (userId, partCount, requiredCallsCount))
def getUser(self, userId):
# define rate limited operation
def doGetUser():
return self.twitter.show_user(user_id=userId)
try:
print("Getting details for user %s..." % userId)
returnValue = doGetUser()
except TwythonRateLimitError:
# wait a complete Twitter time interval
print("Sleeping until limit is lifted...")
self.sleepFor(self.TIME_INTERVAL_IN_SECONDS)
# try again
print("Retrying...")
print("Getting details for user %s..." % userId)
returnValue = doGetUser()
print("done.")
screen_name = returnValue["screen_name"]
statuses_count = returnValue["statuses_count"]
friends_count = returnValue["friends_count"]
followers_count = returnValue["followers_count"]
lang = returnValue["lang"]
followersReady = True # don't collect followers of followers
user = User(id=userId, screen_name=screen_name, statuses_count=statuses_count, friends_count=friends_count, followers_count=followers_count, lang=lang, followers_ready=followersReady)
return user
class TwitterStreaming(TwythonStreamer):
'''Class to handle operations that deal with the Twitter Streaming API'''
def setParams(self, stopTweetCount, stopTime):
self.stopTweetCount = stopTweetCount
self.stopTime = stopTime
self.receivedTweetsCount = 0
self.startTime = time.time()
self.rateTweetCount = 0
self.rateLastTime = time.time()
def getRunningTime(self):
currentTime = time.time()
elapsedTime = currentTime - self.startTime # seconds
elapsedTime /= 60.0 # convert to minutes
return elapsedTime
def getRate(self):
'''Received tweets per minute'''
receivedTweets = self.rateTweetCount
timeInterval = (time.time() - self.rateLastTime) / 60.0 # minutes
rate = receivedTweets / timeInterval
self.rateLastTime = time.time()
self.rateTweetCount = 0
return rate
def on_success(self, data):
try:
text = data["text"]
if not isEnglishTweet(text):
print("Dropped tweet (considered non-english): " + text)
return
else:
print("ENGLISH: " + text)
except:
pass
try:
# User
author = data["user"]
userId = author["id"]
screen_name = author["screen_name"]
statuses_count = author["statuses_count"]
friends_count = author["friends_count"]
followers_count = author["followers_count"]
lang = author["lang"]
followersReady = False
userReady = True
user = User(id=userId, screen_name=screen_name, statuses_count=statuses_count, friends_count=friends_count, followers_count=followers_count, lang=lang, followers_ready=followersReady, user_ready=userReady)
if not User.objects.filter(id=userId).exists():
try:
user.save()
except:
print("ERROR: failed saving user")
# Tweet
tweetId = data["id_str"]
in_reply_to_status_id = data["in_reply_to_status_id"]
if in_reply_to_status_id is None:
in_reply_to_status_id = 0
hashtags = ""
try:
ht = data["entities"]["hashtags"]
tags = [x["text"] for x in ht]
hashtags = " ".join(tags)
except:
# this tweet doesn't contain any hashtag
pass
text = data["text"]
created_at = data["created_at"]
timestamp = time.strftime('%Y-%m-%d %H:%M:%S+00:00', time.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y'))
polarity = 0
polarityReady = False
tweet = Tweet(id=tweetId, user=user, in_reply_to_status_id=in_reply_to_status_id, text=text, created_at=timestamp, polarity=polarity, polarity_ready=polarityReady, hashtags=hashtags)
tweet.save()
# update received tweets count
self.receivedTweetsCount += 1
self.rateTweetCount += 1
# check stop conditions
if (self.receivedTweetsCount is not 0 and self.receivedTweetsCount >= self.stopTweetCount) or (self.stopTime is not 0 and self.getRunningTime() >= self.stopTime):
self.disconnect()
except KeyError:
'''Some tweets are badly sent and are missing user or text'''
pass
except DatabaseError:
'''Bad response (invalid JSON)'''
pass
except Exception as e:
print(e)
pass
def on_error(self, status_code, data):
print(status_code, data)
'''FUNCTIONS'''
def readStream(filterHashtag, stopTweetCount, stopTime, stream):
stream.setParams(stopTweetCount, stopTime)
try:
stream.statuses.filter(track=filterHashtag)
print("Stopped streaming")
except Exception as e:
print(e)
stream.disconnect()
print("Stopped streaming (timeout)")
updateHashtagsUsingSimilar(stream) # retry with similar hashtags
def launchStreamMonitor():
global stream
t = threading.Thread(target=monitorStream, args=(stream,))
t.daemon = True
print("Launched stream monitor...")
t.start()
def monitorStream(stream):
# monitor agent that checks incoming tweet rate
# relaunch stream with similar hashtags if rate is too low
while True:
try:
if stream.connected:
print("Stream is connected.")
tweetsPerMinute = stream.getRate()
print("Current rate: %s tweets/min" % tweetsPerMinute)
streamStartInterval = time.time() - stream.startTime
if tweetsPerMinute < incomingTweetRateThreshold and streamStartInterval >= streamTimeout:
print("Not receiving enough tweets matching this set of hashtags.")
print("Retrying with additional similar hashtags...")
updateHashtagsUsingSimilar(stream)
else:
print("Stream is disconnected.")
except Exception as e:
print(e)
print("WARNING: stream not instantiated. Stopping monitor...")
break
# sleep until next check
time.sleep(monitorCheckInterval)
def updateHashtagsUsingSimilar(stream):
currentTags = [tag.replace("#", "") for tag in streamingHashtag.split(",")]
similarTags = []
for tag in currentTags:
similar = getSimilarHashtags(tag)
similar = sorted(similar, key=lambda x: x[1], reverse=True)
similarCount = 2
similar = similar[:similarCount] # N most similar tags
similar = [tag for tag, unused in similar]
similarTags += similar
similarTags += currentTags
similarTags = set(similarTags) # remove duplicates
newFilterTags = ",".join(["#" + tag for tag in similarTags]) # convert to comma seperated string with leading # for each tag
print("Current tags: %s" % streamingHashtag)
print("New tags: %s" % newFilterTags)
# stop stream
stream.disconnect()
# restart with new tags
stopTweetCount = stream.stopTweetCount - stream.receivedTweetsCount
stopTime = stream.stopTime
del stream
startStreaming(newFilterTags, stopTweetCount, stopTime)
def startStreaming(filterHashtag="twitter", stopTweetCount=0, stopTime=0):
global streamingHashtag
streamingHashtag = filterHashtag
global stream
# start streaming
t = threading.Thread(target=readStream, args=(filterHashtag, stopTweetCount, stopTime, stream))
t.daemon = True
print("started reading stream...")
t.start()
def toggleStreaming():
global stream
if not stream.connected:
startStreaming()
else:
print("disconnecting stream...")
stream.disconnect()
# instantiate stream
stream = TwitterStreaming(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET, timeout=streamTimeout)
# monitor stream
if isStreamMonitorEnabled:
launchStreamMonitor()
| Python |
# Create your views here.
import math
import networkx as nx
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db import connection
from ITInfluence.models import Tweet, User
from ITInfluence.models import InfrarougeUser, InfrarougeForum, InfrarougeForumThread, InfrarougeThreadMessage
from ITInfluence import twitter, followers, NDI
from ITInfluence.plot import GraphPlotter, TwoDimensionalValuesPlotter
from ITInfluence.hashtags import getAllHashtags, getSimilarHashtags
from ITInfluence.infrarouge import InfrarougeGraphBuilder
from ITInfluence.TwitterGraphBuilder import TwitterGraphBuilder, TwitterFollowersGraphBuilder
from Tools.Timer import timer
from SentiStrength import sentistrength
from InfrarougeTwitterInfluence.settings import DATABASES
'''VIEWS'''
def index(request):
return redirect("/infrarouge/")
def twitterStats(request):
NDIPath = "/twitter/images/ndi/"
NDITimePath = "/twitter/images/ndi-time/"
NDIRepliesCountPath = "/twitter/images/ndi-replies-count/"
repliesGraphVisualizationPath = "/twitter/images/replies-graph/"
followersGraphVisualizationPath = "/twitter/images/followers-graph/"
tweetCount = Tweet.objects.count()
### Use one of the two methods (slow but exact count or fast estimation)
## METHOD 1
# Uncomment for exact count (WARNING: slow for large tables)
# userCount = User.objects.count()
# friendshipCount = Friendship.objects.count()
## METHOD 2
# Uncomment for fast count using approximation
databaseName = DATABASES["default"]["NAME"]
userTableName = "ITInfluence_user"
friendshipTableName = "ITInfluence_friendship"
query = """
SELECT table_name, table_rows
FROM information_schema.tables
WHERE table_schema = %s
"""
cursor = connection.cursor()
cursor.execute(query, [databaseName])
response = cursor.fetchall()
userCount = 0
friendshipCount = 0
for row in response:
if row[0].lower() == userTableName.lower():
userCount = row[1]
if row[0].lower() == friendshipTableName.lower():
friendshipCount = row[1]
cursor.close()
### END
context = {
"tweetCount": tweetCount,
"userCount": userCount,
"friendshipCount": friendshipCount,
'NDIPath': NDIPath,
'NDITimePath': NDITimePath,
'NDIRepliesCountPath': NDIRepliesCountPath,
"repliesGraphVisualizationPath": repliesGraphVisualizationPath,
"followersGraphVisualizationPath": followersGraphVisualizationPath,
}
return render_to_response("ITInfluence/twitter.html", context, context_instance=RequestContext(request))
def twitterBrowseHashtags(request):
hashtags = getAllHashtags()
minimumOccurences = 10
hashtags = [(tag, occurences, fontSize(occurences)) for (tag, occurences) in hashtags if occurences >= minimumOccurences]
context = {
"hashtags": hashtags,
"minimumOccurences": minimumOccurences,
}
return render_to_response("ITInfluence/twitter-browse-hashtags.html", context, context_instance=RequestContext(request))
def twitterHashtag(request, hashtag=""):
similarHashtags = getSimilarHashtags(hashtag)
similarHashtags = [(tag, occurences, fontSize(occurences)) for (tag, occurences) in similarHashtags]
context = {
"hashtag": hashtag,
"similarHashtags": similarHashtags,
}
return render_to_response("ITInfluence/hashtag.html", context, context_instance=RequestContext(request))
def twitterBrowseTweets(request):
classifyTweets() # using sentistrength
try:
tweetsPerPage = int(request.GET.get("tpp"))
if tweetsPerPage <= 0:
raise Exception("Tweets per page can't be negative")
except:
tweetsPerPage = 20
tweets = Tweet.objects.all().order_by("-created_at")
paginator = Paginator(tweets, tweetsPerPage)
tweetCount = Tweet.objects.count()
page = request.GET.get("page")
try:
tweets = paginator.page(page)
except PageNotAnInteger:
tweets = paginator.page(1)
except EmptyPage:
tweets = paginator.page(paginator.num_pages)
context = {
"tweets": tweets,
"tweetCount": tweetCount,
"tweetsPerPage": tweetsPerPage,
}
return render_to_response("ITInfluence/twitter-browse-tweets.html", context, context_instance=RequestContext(request))
def twitterFollowersCountRanking(request):
users = User.objects.all().filter(user_ready=1).order_by("-followers_count")
rankingLength = 30 # top n users
users = users[:rankingLength]
context = {
"users": users,
}
return render_to_response("ITInfluence/twitter-followers-ranking.html", context, context_instance=RequestContext(request))
def twitterStartCollectingStream(request, hashtag):
context = {
}
return render_to_response("ITInfluence/twitter-collect-stream.html", context, context_instance=RequestContext(request))
def twitterShowCollectForm(request):
totalTimeToCollectFolowers = followers.getTotalTimeToCollectFollowers()
remainingTimeToCollectFolowers = followers.getRemainingTimeToCollectFollowers()
twoDecimalFormat = "{0:.2f}"
followersTimeDays = twoDecimalFormat.format(totalTimeToCollectFolowers[0])
followersTimeHours = twoDecimalFormat.format(totalTimeToCollectFolowers[1])
followersTimeMinutes = twoDecimalFormat.format(totalTimeToCollectFolowers[2])
followersRemainingTimeDays = twoDecimalFormat.format(remainingTimeToCollectFolowers[0])
followersRemainingTimeHours = twoDecimalFormat.format(remainingTimeToCollectFolowers[1])
followersRemainingTimeMinutes = twoDecimalFormat.format(remainingTimeToCollectFolowers[2])
try:
followersProgressPercentage = 100 - 100 * (remainingTimeToCollectFolowers[2] / totalTimeToCollectFolowers[2])
except:
followersProgressPercentage = 100
followersProgressPercentage = twoDecimalFormat.format(followersProgressPercentage)
context = {
"isCollectingFollowers": followers.rest.isCollectingFollowers,
"followersTimeDays": followersTimeDays,
"followersTimeHours": followersTimeHours,
"followersTimeMinutes": followersTimeMinutes,
"followersRemainingTimeDays": followersRemainingTimeDays,
"followersRemainingTimeHours": followersRemainingTimeHours,
"followersRemainingTimeMinutes": followersRemainingTimeMinutes,
"followersProgressPercentage": followersProgressPercentage,
}
try:
hashtagSeparator = ","
# add a leading # to all tags seperated by commas
hashtags = ["#" + tag.strip() for tag in request.POST["hashtag"].split(hashtagSeparator)]
filterTag = hashtagSeparator.join(hashtags)
stopTweetCount = int(request.POST["stopTweetCount"])
stopTime = int(request.POST["stopTime"])
# start collecting tweets
twitter.startStreaming(filterHashtag=filterTag, stopTweetCount=stopTweetCount, stopTime=stopTime)
except:
pass
context["isStreaming"] = twitter.stream.connected
context["streamingHashtag"] = twitter.streamingHashtag
return render_to_response("ITInfluence/twitter-collect-form.html", context, context_instance=RequestContext(request))
def twitterToggleCollectingFollowers(request):
followers.toggleFollowersCollection()
return redirect("/twitter/collect/")
def infrarougeStats(request):
userCount = InfrarougeUser.objects.count()
forumCount = InfrarougeForum.objects.count()
forumThreadCount = InfrarougeForumThread.objects.count()
threadMessageCount = InfrarougeThreadMessage.objects.count()
NDIPath = "/infrarouge/images/ndi/"
NDITimePath = "/infrarouge/images/ndi-time/"
NDIRepliesCountPath = "/infrarouge/images/ndi-replies-count/"
repliesGraphVisualizationPath = "/infrarouge/images/replies-graph/"
userDiscussionsGraphVisualizationPath = "/infrarouge/images/user-discussion-graph/"
context = {
'forumCount': forumCount,
'forumThreadCount': forumThreadCount,
'threadMessageCount': threadMessageCount,
'userCount': userCount,
'NDIPath': NDIPath,
'NDITimePath': NDITimePath,
'NDIRepliesCountPath': NDIRepliesCountPath,
'repliesGraphVisualizationPath': repliesGraphVisualizationPath,
'userDiscussionsGraphVisualizationPath': userDiscussionsGraphVisualizationPath,
}
return render_to_response("ITInfluence/infrarouge.html", context, context_instance=RequestContext(request))
def getInfrarougeForums(request):
forums = InfrarougeForum.objects.all()
context = {
"forums": forums,
}
return render_to_response("ITInfluence/infrarouge-forums.html", context, context_instance=RequestContext(request))
def twitterStopStreaming(request):
twitter.toggleStreaming()
return redirect("/twitter/collect/")
'''Image views'''
def showImage(request, path):
'''Wrapper view that displays the image at given path on a html page'''
context ={
"path": path,
}
return render_to_response("ITInfluence/show-image.html", context, context_instance=RequestContext(request))
# These views do NOT return HTML. They return PNG images.
def getInfrarougeNDI(request):
with timer():
resultsEqualTime, resultsEqualRepliesCount = NDI.polarizationForAllForums()
plotter = TwoDimensionalValuesPlotter()
xlabel = "Interval"
ylabel = "NDI"
response = plotter.memoryPlotMultipleDatasetsMultidimensional([resultsEqualTime, resultsEqualRepliesCount], xlabel, ylabel)
return response
def getInfrarougeNDITimeFigure(request):
with timer():
resultsEqualTime, unused = NDI.polarizationForAllForums(equalRepliesCount=False)
plotter = TwoDimensionalValuesPlotter()
xlabel = "Interval (equal time)"
ylabel = "NDI"
response = plotter.memoryPlotMultipleDatasets(resultsEqualTime, xlabel, ylabel)
return response
def getInfrarougeNDIReplyCountFigure(request):
with timer():
unused, resultsEqualRepliesCount = NDI.polarizationForAllForums(equalTime=False)
plotter = TwoDimensionalValuesPlotter()
xlabel = "Interval (equal reply count)"
ylabel = "NDI"
response = plotter.memoryPlotMultipleDatasets(resultsEqualRepliesCount, xlabel, ylabel)
return response
def getInfrarougeRepliesGraph(request):
with timer():
infrarougeGraphBuilder = InfrarougeGraphBuilder(NDI.infrarougeDatabasePath)
graphPlotter = GraphPlotter()
graph = infrarougeGraphBuilder.graph1
nodeLabel="User"
edgeLabel="Reply"
response = graphPlotter.memoryPlot(graph, nodeLabel=nodeLabel, edgeLabel=edgeLabel)
return response
def getInfrarougeUserDiscussionGraph(request):
with timer():
infrarougeGraphBuilder = InfrarougeGraphBuilder(NDI.infrarougeDatabasePath)
graphPlotter = GraphPlotter()
graph = infrarougeGraphBuilder.graph2
# compute bipartite positions for nodes
pos = graphPlotter.bipartiteNodePositions(graph)
nodeLabel="User"
nodeLabel2="Discussion"
edgeLabel="Participation"
response = graphPlotter.memoryPlot(graph, bipartite=True, pos=pos, nodeLabel=nodeLabel, nodeLabel2=nodeLabel2, edgeLabel=edgeLabel)
return response
def getTwitterNDI(request):
with timer(): # measure execution time
resultsEqualTime, resultsEqualRepliesCount = NDI.getNDIForMostFrequentHashtags()
plotter = TwoDimensionalValuesPlotter()
xlabel = "Interval"
ylabel = "NDI"
response = plotter.memoryPlotMultipleDatasetsMultidimensional([resultsEqualTime, resultsEqualRepliesCount], xlabel, ylabel)
return response
def getTwitterNDISingleHashtag(request, hashtag):
with timer():
resultsEqualTime, resultsEqualRepliesCount = NDI.getNDIForHashtag(hashtag)
plotter = TwoDimensionalValuesPlotter(width=800, height=600)
xlabel = "Interval"
ylabel = "NDI"
response = plotter.memoryPlotMultipleDatasetsMultidimensional([resultsEqualTime, resultsEqualRepliesCount], xlabel, ylabel)
return response
def getTwitterNDITimeFigure(request):
with timer():
resultsEqualTime, unused = NDI.getNDIForMostFrequentHashtags(equalRepliesCount=False)
plotter = TwoDimensionalValuesPlotter()
xlabel = "Interval (equal time)"
ylabel = "NDI"
response = plotter.memoryPlotMultipleDatasets(resultsEqualTime, xlabel, ylabel)
return response
def getTwitterNDIReplyCountFigure(request):
with timer():
unused, resultsEqualRepliesCount = NDI.getNDIForMostFrequentHashtags(equalTime=False)
plotter = TwoDimensionalValuesPlotter()
xlabel = "Interval (equal time)"
ylabel = "NDI"
response = plotter.memoryPlotMultipleDatasets(resultsEqualRepliesCount, xlabel, ylabel)
return response
def getTwitterRepliesGraph(request):
with timer():
twitterGraphBuilder = TwitterGraphBuilder()
graph = twitterGraphBuilder.graph
# save graph to gml file
# filename = "replies_graph.gml"
# saveGraph(graph, filename)
graphPlotter = GraphPlotter()
print("Plotting graph...")
response = graphPlotter.memoryPlot(graph)
print("...done!")
return response
def getTwitterFollowersGraph(request):
with timer():
twitterGraphBuilder = TwitterFollowersGraphBuilder()
graph = twitterGraphBuilder.graph
# save graph to gml file
# filename = "followers_graph.gml"
# saveGraph(graph, filename)
graphPlotter = GraphPlotter()
nodeLabel="User"
edgeLabel="Friendship"
print("Plotting graph...")
response = graphPlotter.memoryPlot(graph, nodeSizes=twitterGraphBuilder.nodeSizes, nodeColor=twitterGraphBuilder.nodeSizes, nodeLabel=nodeLabel, edgeLabel=edgeLabel)
print("...done!")
return response
'''HELPER FUNCTIONS'''
def saveGraph(graph, filename):
nx.gml.write_gml(graph, filename)
def fontSize(occurences):
return 1 + math.log(occurences)
def classifyTweets():
# only classify tweets that are not classified yet
tweets = Tweet.objects.all().filter(polarity_ready=False)
if len(tweets) > 0:
s = sentistrength.SentiStrength()
# concatenate all texts to annotate at once
text = ""
for tweet in tweets:
text += tweet.text.replace("\n", " ") + "\n"
polarities = s.classifiy(text)
# update tweets with their polarity
for tweet, polarity in zip(tweets, polarities):
tweet.polarity = polarity
tweet.polarity_ready = True
tweet.save() | Python |
'''
Created on 12 juin 2013
@author: Nils Amiet
'''
# Database routers are used to know which model should be used with which database.
# This is useful in our case because we have multiple databases.
class TwitterRouter():
def db_for_read(self, model, **hints):
if hasattr(model._meta, "isTwitterModel"):
return "default"
else:
return "infrarouge"
def db_for_write(self, model, **hints):
if hasattr(model._meta, "isTwitterModel"):
return "default"
else:
return "infrarouge"
def allow_syncdb(self, db, model):
isTwitterModel = hasattr(model._meta, "isTwitterModel")
if isTwitterModel:
return db == "default"
else:
return not db == "default" | Python |
'''
Created on 23 mars 2013
@author: Nils Amiet
'''
# coding: utf-8
'''
Graph 1:
Replies graph
Weighted directed graph
Weight of edge from A to B = number of replies from user A to user B
Graph 2:
User-Discussion graph
Bipartite graph
Edge from User A to discussion D => user A participates in discussion D
'''
import networkx as nx
import networkx.readwrite.gml as gml
import sqlite3
import random
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
'''Config'''
databasePath = "../../../../../InfrarougeGrabber/infrarouge.db"
graph1Path = "graph1.gml"
graph2Path = "graph2.gml"
userRepliesCounts = {}
graph1 = nx.DiGraph()
userParticipations = []
graph2 = nx.Graph()
def users():
users = []
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
query = "SELECT * FROM user"
cursor.execute(query)
for user in cursor:
users += [user]
cursor.close()
return users
def getForums():
forums = []
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
query = """
SELECT *
FROM forum
"""
cursor.execute(query)
for forum in cursor:
builtForum = {
"id": forum[0],
"title": forum[1],
"description": forum[2],
}
forums += [builtForum]
return forums
def polarityReplies(forum=None):
'''Returns a list of replies containing the source, destination, polarity, timestamp and forum id'''
replies = []
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
query = """
SELECT r.fkfrom, r.fkto, m.polarity, m.timestamp, f.id
FROM reply as r
INNER JOIN threadmessage as m
ON r.fkthreadmessage=m.id
INNER JOIN forumthread as t
ON m.fkforumthread=t.id
INNER JOIN forum as f
ON t.fkparentforum=f.id
"""
if forum is not None:
query += "WHERE f.id=" + str(forum)
cursor.execute(query)
for reply in cursor:
builtReply = {
"from": reply[0],
"to": reply[1],
"polarity": reply[2],
"timestamp": reply[3],
"forum": reply[4]
}
replies += [builtReply]
cursor.close()
return replies
def countUserReplies():
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
query = "SELECT * FROM reply"
cursor.execute(query)
for reply in cursor:
countReply(reply)
cursor.close()
def countReply(reply):
global userRepliesCounts
fromUser = reply[0]
toUser = reply[1]
# discussion = reply[2]
fromTo = (fromUser, toUser)
try:
userRepliesCounts[fromTo] += 1
except KeyError:
userRepliesCounts[fromTo] = 1
def createRepliesGraph():
global graph1
for fromTo, w in userRepliesCounts.items():
graph1.add_edge(fromTo[0], fromTo[1], weight=w)
def buildUserParticipations():
global userParticipations
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
query = "SELECT * FROM threadmessage"
cursor.execute(query)
for threadMessage in cursor:
forumThread = threadMessage[2]
user = threadMessage[3]
userThreadTuple = (user, forumThread)
userParticipations += [userThreadTuple]
cursor.close()
def createParticipationGraph():
global graph2
users = [x[0] for x in userParticipations]
discussions = [x[1] for x in userParticipations]
graph2.add_nodes_from(users, bipartite=0)
graph2.add_nodes_from(discussions, bipartite=1)
graph2.add_edges_from(userParticipations)
def saveGraphs():
gml.write_gml(graph1, graph1Path)
gml.write_gml(graph2, graph2Path)
def drawGraph(graph, filename, dpi):
pos=nx.spring_layout(graph)
# pos = nx.shell_layout(graph)
# pos = nx.graphviz_layout(graph)
nx.draw(graph, pos=pos)
plt.savefig(filename, dpi=dpi) # save as png
if __name__ == "__main__":
countUserReplies()
createRepliesGraph()
buildUserParticipations()
createParticipationGraph()
saveGraphs()
print(len(graph1.nodes()))
print(len(graph2.nodes()))
print(polarityReplies(forum=1))
print(getForums())
drawGraph(graph1, "g1.png", 180) | Python |
'''
Created on 23 mai 2013
@author: Nils Amiet
'''
# coding: utf-8
class PolarityCounter:
polarityCounts = {}
RECEIVED = "recv"
SENT = "sent"
AVERAGE = "avg"
NDI = -1 # network disagreement index
def __init__(self, replies):
'''
Replies: a list of replies with the attributes "from", "to" and "polarity"
'''
self.replies = replies
self.edges = []
self.countPolarities()
self.computeAveragePolarities()
self.computeNDI()
def countPolarities(self):
for reply in self.replies:
fromUser = reply["from"]
toUser = reply["to"]
polarity = reply["polarity"]
polarity = 0 if polarity <= 0 else 1
try:
fromUserCounts = self.polarityCounts[fromUser][self.SENT]
except KeyError:
self.polarityCounts[fromUser] = {self.RECEIVED : {}, self.SENT: {}, self.AVERAGE: None}
fromUserCounts = self.polarityCounts[fromUser][self.SENT]
try:
toUserCounts = self.polarityCounts[toUser][self.RECEIVED]
except KeyError:
self.polarityCounts[toUser] = {self.RECEIVED : {}, self.SENT: {}, self.AVERAGE: None}
toUserCounts = self.polarityCounts[toUser][self.RECEIVED]
try:
fromUserCounts[polarity] += 1
except:
fromUserCounts[polarity] = 1
try:
toUserCounts[polarity] += 1
except:
toUserCounts[polarity] = 1
def computeAveragePolarities(self):
for user, userCounts in self.polarityCounts.items():
try:
receivedPositive = userCounts[self.RECEIVED][1]
except:
receivedPositive = 0
try:
receivedNegative = userCounts[self.RECEIVED][0]
except:
receivedNegative = 0
try:
sentPositive = userCounts[self.SENT][1]
except:
sentPositive = 0
try:
sentNegative = userCounts[self.SENT][0]
except:
sentNegative = 0
try:
recv = (receivedPositive - receivedNegative) / float(receivedPositive + receivedNegative)
except:
pass # user never received a message
try:
sent = (sentPositive - sentNegative) / float(sentPositive + sentNegative)
except:
pass # user never sent a message
try:
userCounts[self.AVERAGE] = abs(recv - sent)
except:
pass # user never received or sent a message
def computeNDI(self):
self.computeEdges()
sumNDI = 0
for edge in self.edges:
weight = 1
firstUser = edge["from"]
secondUser = edge["to"]
try:
firstUserOpinion = self.polarityCounts[firstUser][self.AVERAGE] / float(2)
except:
firstUserOpinion = 0
try:
secondUserOpinion = self.polarityCounts[secondUser][self.AVERAGE] / float(2)
except:
secondUserOpinion = 0
# print (firstUserOpinion, secondUserOpinion)
increment = weight * ((firstUserOpinion - secondUserOpinion)**2)
sumNDI += increment
# print("NDI=" + str(sumNDI) + " (+ " + str(increment) + ")")
self.NDI = sumNDI
def computeEdges(self):
for reply in self.replies:
fromUser = reply["from"]
toUser = reply["to"]
if not self.contains(reply, self.edges):
self.edges += [reply]
def contains(self, reply, edges):
for edge in edges:
if self.isSameMessage(reply, edge):
return True
return False
def isSameMessage(self, reply, reply2):
return reply["from"] == reply2["from"] and reply["to"] == reply2["to"] or reply["from"] == reply2["to"] and reply["to"] == reply2["from"]
def polarityDifferenceRanking(self):
users = {user: userCounts[self.AVERAGE] for user, userCounts in self.polarityCounts.items() if not userCounts[self.AVERAGE] == None}
print(users)
ranking = sorted(users.items(), key=lambda x: x[1], reverse=True)
print(ranking)
return ranking
if __name__ == "__main__":
r1 = {"from": 4, "to": 5, "polarity": 1}
r2 = {"from": 5, "to": 4, "polarity": 1}
r3 = {"from": 4, "to": 2, "polarity": 0}
replies = [r1, r2, r3]
polarityCounter = PolarityCounter(replies)
print(polarityCounter.polarityCounts)
polarityCounter.polarityDifferenceRanking() | Python |
'''
Created on 4 avr. 2013
@author: Nils Amiet
'''
# coding: utf-8
graph1Path = "graph1.gml"
graph2Path = "graph2.gml"
rankingsFile = "rankings.html"
import networkx as nx
import networkx.readwrite.gml as gml
import operator
import math
import copy
import os
import sys
import sqlite3
''' Required for running the script from anywhere outside eclipse'''
os.chdir(os.path.dirname(os.path.realpath(__file__)))
sys.path.append('../../../../../InfrarougeGraphBuilder')
from ch.hearc.infrarouge.graphbuilder.HTMLTableGenerator import HTMLTableGenerator
from ch.hearc.infrarouge.graphbuilder.main import users, polarityReplies, getForums, databasePath
from ch.hearc.infrarouge.graphbuilder.polarity import PolarityCounter
sqlUsers = users()
averageRankings = []
def readGraphs():
g1 = gml.read_gml(graph1Path, relabel=True)
g2 = gml.read_gml(graph2Path, relabel=True)
return (g1, g2)
def getSortedTop(centrality):
return sorted(centrality.items(), key=operator.itemgetter(1), reverse=True)
def writeHTML(filename, headings, rows, readWriteMode='w'):
html = HTMLTableGenerator(th=headings, rows=rows, users=sqlUsers)
with open(filename, readWriteMode) as file:
file.write(str(html))
def writeRawHTML(filename, headings, rows, readWriteMode='w'):
htmlGenerator = HTMLTableGenerator(th=headings, rows=rows, users=sqlUsers)
html = htmlGenerator.raw()
with open(filename, readWriteMode) as file:
file.write(str(html))
def writeHeader(filename):
html = HTMLTableGenerator(title="Infrarouge Centrality rankings")
with open(filename, 'w') as file:
file.write(str(html.header()))
def writeFooter(filename):
html = HTMLTableGenerator()
with open(filename, 'a') as file:
file.write(str(html.footer()))
def writeH1(filename, text):
html = HTMLTableGenerator()
with open(filename, 'a') as file:
file.write(html.h1(text))
def createRanking(graph, ev=True, directed=False):
# general graph information
headings = ["#nodes", "#edges", "directed", "weighted", "bipartite"]
rows = []
if directed:
weighted = True
bipartite = False
else:
weighted = False
bipartite = True
rows += [(str(len(graph.nodes())), str(len(graph.edges())), str(directed), str(weighted), str(bipartite))]
writeRawHTML(rankingsFile, headings, rows, readWriteMode='a')
# centrality rankings
degreeCentrality = nx.degree_centrality(graph)
if directed:
inDegreeCentrality = nx.in_degree_centrality(graph)
outDegreeCentrality = nx.out_degree_centrality(graph)
closenessCentrality = nx.closeness_centrality(graph)
betweennessCentrality = nx.betweenness_centrality(graph)
nbNodes = len(graph.nodes())
topLength = 15
degreeRanking = getSortedTop(degreeCentrality)
if directed:
inDegreeRanking = getSortedTop(inDegreeCentrality)
outDegreeRanking = getSortedTop(outDegreeCentrality)
closenessRanking = getSortedTop(closenessCentrality)
betweennessRanking = getSortedTop(betweennessCentrality)
headings = ["user id", "username", "degree centrality"]
writeHTML(rankingsFile, headings, degreeRanking[:topLength], readWriteMode='a')
if directed:
headings = ["user id", "username", "in-degree centrality"]
writeHTML(rankingsFile, headings, inDegreeRanking[:topLength], readWriteMode='a')
headings = ["user id", "username", "out-degree centrality"]
writeHTML(rankingsFile, headings, outDegreeRanking[:topLength], readWriteMode='a')
headings = ["user id", "username", "closeness centrality"]
writeHTML(rankingsFile, headings, closenessRanking[:topLength], readWriteMode='a')
headings = ["user id", "username", "betweenness centrality"]
writeHTML(rankingsFile, headings, betweennessRanking[:topLength], readWriteMode='a')
if ev:
evCentrality = nx.eigenvector_centrality(graph, max_iter=500)
evRanking = getSortedTop(evCentrality)
headings = ["user id", "username", "eigenvector centrality"]
writeHTML(rankingsFile, headings, evRanking[:topLength], readWriteMode='a')
clusteringCoefficientRanking = computeClusteringCoefficient(graph, directed)
# clustering coefficient
headings = ["user id", "username", "clustering coefficient"]
writeHTML(rankingsFile, headings, clusteringCoefficientRanking[:topLength], readWriteMode='a')
# fraction of common users
commonNodesStatsDCBE = []
commonNodesStatsDE = []
commonNodesStatsCE = []
commonNodesStatsBE = []
commonNodesStatsDC = []
commonNodesStatsDB = []
commonNodesStatsBC = []
commonNodesStatsDCB = []
fraction = 1
while fraction <= 20: # from 1% to 20%
if ev:
commonNodesFractionDCBE = computeFractionOfCommonUsers(fraction/100.0, degreeRanking, closenessRanking, betweennessRanking, evRanking)
commonNodesFractionDE = computeFractionOfCommonUsers(fraction/100.0, degreeRanking, evRanking)
commonNodesFractionCE = computeFractionOfCommonUsers(fraction/100.0, closenessRanking, evRanking)
commonNodesFractionBE = computeFractionOfCommonUsers(fraction/100.0, betweennessRanking, evRanking)
commonNodesStatsDCBE += [(str(fraction) + "%", str(100*commonNodesFractionDCBE) + "%")]
commonNodesStatsDE += [(str(fraction) + "%", str(100*commonNodesFractionDE) + "%")]
commonNodesStatsCE += [(str(fraction) + "%", str(100*commonNodesFractionCE) + "%")]
commonNodesStatsBE += [(str(fraction) + "%", str(100*commonNodesFractionBE) + "%")]
commonNodesFractionDCB = computeFractionOfCommonUsers(fraction/100.0, degreeRanking, closenessRanking, betweennessRanking)
commonNodesFractionDC = computeFractionOfCommonUsers(fraction/100.0, degreeRanking, closenessRanking)
commonNodesFractionDB = computeFractionOfCommonUsers(fraction/100.0, degreeRanking, betweennessRanking)
commonNodesFractionBC = computeFractionOfCommonUsers(fraction/100.0, betweennessRanking, closenessRanking)
commonNodesStatsDC += [(str(fraction) + "%", str(100*commonNodesFractionDC) + "%")]
commonNodesStatsDB += [(str(fraction) + "%", str(100*commonNodesFractionDB) + "%")]
commonNodesStatsBC += [(str(fraction) + "%", str(100*commonNodesFractionBC) + "%")]
commonNodesStatsDCB += [(str(fraction) + "%", str(100*commonNodesFractionDCB) + "%")]
fraction += 1
if ev:
headings = ["top proportion", "fraction of common users [DC, CC, BC, EC]"]
writeRawHTML(rankingsFile, headings, commonNodesStatsDCBE, readWriteMode='a')
headings = ["top proportion", "fraction of common users [DC, CC, BC]"]
writeRawHTML(rankingsFile, headings, commonNodesStatsDCB, readWriteMode='a')
topNumber = "top proportion"
if ev:
headingsDE = [topNumber, "fraction of common users [DC, EC]"]
headingsCE = [topNumber, "fraction of common users [CC, EC]"]
headingsBE = [topNumber, "fraction of common users [BC, EC]"]
writeRawHTML(rankingsFile, headingsDE, commonNodesStatsDE, readWriteMode='a')
writeRawHTML(rankingsFile, headingsCE, commonNodesStatsCE, readWriteMode='a')
writeRawHTML(rankingsFile, headingsBE, commonNodesStatsBE, readWriteMode='a')
headingsDC = [topNumber, "fraction of common users [DC, CC]"]
headingsDB = [topNumber, "fraction of common users [DC, BC]"]
headingsBC = [topNumber, "fraction of common users [BC, CC]"]
writeRawHTML(rankingsFile, headingsDC, commonNodesStatsDC, readWriteMode='a')
writeRawHTML(rankingsFile, headingsDB, commonNodesStatsDB, readWriteMode='a')
writeRawHTML(rankingsFile, headingsBC, commonNodesStatsBC, readWriteMode='a')
# top 15 common nodes
topn = 15
if ev:
fraction = topn / float(nbNodes)
commonNodesTop15Fraction = computeFractionOfCommonUsers(fraction, degreeRanking, closenessRanking, betweennessRanking, evRanking)
commonNodesTop15Stats = [("Top " + str(topn), str(int(commonNodesTop15Fraction*topn)) + " (" + str(100*commonNodesTop15Fraction) + "%)")]
topNumber = "top number"
headings = [topNumber, "Common users [DC, CC, BC, EC] in top " + str(topn)]
writeRawHTML(rankingsFile, headings, commonNodesTop15Stats, readWriteMode='a')
# average rank
topNodes = []
if ev:
# compute average of all 4
rankings = [degreeRanking, closenessRanking, betweennessRanking, evRanking]
else:
# compute average of all 3
rankings = [degreeRanking, closenessRanking, betweennessRanking]
for ranking in rankings:
topNodes += [x[0] for x in ranking]
topNodes = set(topNodes)
averageRankStats = []
for node in topNodes:
averageRank = 0
for ranking in rankings:
try:
rank = [x[0] for x in ranking].index(node)
except:
print("Warning: Node not in ranking!")
averageRank += rank
averageRank /= float(len(rankings))
averageRank += 1
averageRankStats += [(node, averageRank)]
averageRankStats = sorted(averageRankStats, key=operator.itemgetter(1), reverse=False)
global averageRankings
averageRankings += [averageRankStats]
headings = ["node id", "username", "average rank"]
writeHTML(rankingsFile, headings, averageRankStats[:topn], readWriteMode='a')
# top n% subgraph
limit = 7
for topPercent in range(10, 100, 15):
topNumber = int(nbNodes * topPercent / float(100))
topNodes = [x[0] for x in averageRankStats][:topNumber]
topNodesAndNeighbors = copy.deepcopy(topNodes)
for node in topNodes:
topNodesAndNeighbors += graph.neighbors(node)
subGraph = nx.subgraph(graph, topNodes)
subGraphWithNeighbors = nx.subgraph(graph, topNodesAndNeighbors)
filename = "graph" + ("1" if directed else "2") + "top" + str(topPercent) + "percent.gml"
saveGraph(subGraph, filename)
clusteringCoefficientRanking = computeClusteringCoefficient(subGraph, directed)
clusteringCoefficientNeighborsRanking = computeClusteringCoefficient(subGraphWithNeighbors, directed)
clusteringCoefficientNeighborsRanking = [x for x in clusteringCoefficientNeighborsRanking if x[0] in topNodes]
headings = ["user id", "username", "clustering coefficient for top" + str(topPercent) + "% subgraph"]
writeHTML(rankingsFile, headings, clusteringCoefficientRanking[:limit], readWriteMode='a')
headings = ["user id", "username", "clustering coefficient (with neighbors) for top" + str(topPercent) + "% subgraph"]
writeHTML(rankingsFile, headings, clusteringCoefficientNeighborsRanking[:limit], readWriteMode='a')
def computeClusteringCoefficient(graph, directed):
if not directed:
clusteringCoefficient = nx.clustering(graph)
else:
g = nx.Graph()
g.add_nodes_from(graph.nodes())
g.add_edges_from(graph.edges())
clusteringCoefficient = nx.clustering(g)
clusteringCoefficientRanking = getSortedTop(clusteringCoefficient)
return clusteringCoefficientRanking
def saveGraph(graph, filename):
gml.write_gml(graph, filename)
def computeFractionOfCommonUsers(fraction, *rankings):
if len(rankings) > 1:
nbNodes = len(rankings[0])
portionSize = math.ceil(fraction * nbNodes)
commonNodes = set([x[0] for x in rankings[0]][:portionSize])
for ranking in rankings:
rankingPortion = [x[0] for x in ranking][:portionSize]
commonNodes = [x for x in commonNodes if x in rankingPortion]
commonNodesFraction = len(commonNodes) / float(portionSize)
return commonNodesFraction
raise Exception("expected at least 2 rankings")
def createGeneralRanking():
generalRanking = {}
generalRankingNodeOccurences = {}
for ranking in averageRankings:
for nodeRankTuple in ranking:
node = nodeRankTuple[0]
rank = nodeRankTuple[1]
try:
generalRanking[node] += rank
except:
generalRanking[node] = rank
try:
generalRankingNodeOccurences[node] += 1
except:
generalRankingNodeOccurences[node] = 1
finalRanking = []
for node, rank in generalRanking.items():
averageRank = rank / float(generalRankingNodeOccurences[node])
finalRanking += [(node, averageRank)]
finalRanking = sorted(finalRanking, key=operator.itemgetter(1), reverse=False)
headings = ["node id", "username", "average rank over both graphs"]
writeHTML(rankingsFile, headings, finalRanking, readWriteMode='a')
def createPolarityRanking():
limit = 7
# polarity ranking
replies = polarityReplies()
polarityCounter = PolarityCounter(replies)
polarityRanking = polarityCounter.polarityDifferenceRanking()
headings = ["user id", "username", "Greatest polarity difference"]
writeHTML(rankingsFile, headings, polarityRanking[:limit], readWriteMode='a')
polarityRanking.reverse()
headings = ["user id", "username", "Lowest polarity difference"]
writeHTML(rankingsFile, headings, polarityRanking[:limit], readWriteMode='a')
print("users: %s, ranking length: %s" % (len(sqlUsers), len(polarityRanking)))
print(len(polarityCounter.replies))
print(len(polarityCounter.edges))
print("NDI=" + str(polarityCounter.NDI))
def printTime(interval):
seconds = interval / 1000
minutes = seconds / 60
hours = minutes / 60
days = hours / 24
print(days, hours, minutes, seconds)
def polarizationForAllForums():
forums = getForums()
for forum in forums:
forumID = forum["id"]
replies = polarityReplies(forum=forumID)
computePolarizationOverTime(replies, forumID)
def computePolarizationOverTime(replies, forumID):
timeSortedReplies = sorted(replies, key=lambda x: x["timestamp"])
first = timeSortedReplies[0]["timestamp"]
last = timeSortedReplies[-1]["timestamp"]
interval = last - first
timeSliceCount = 20
deltaTimeMillis = interval / timeSliceCount
ndiVariation = []
timeThreshold = first + deltaTimeMillis
while timeThreshold <= last:
r = repliesBeforeTime(timeSortedReplies, timeThreshold)
polarityCounter = PolarityCounter(r)
edgesCount = len(polarityCounter.edges)
ndiVariation += [(timeThreshold, polarityCounter.NDI, edgesCount)]
timeThreshold += deltaTimeMillis
times = [x[0] for x in ndiVariation]
ndis = [x[1] for x in ndiVariation]
edgesCounts = [x[2] for x in ndiVariation]
# print(times)
# print(ndis)
# print(edgesCounts)
filename = "topic" + str(forumID) + ".m"
writeListsToFile(times, ndis, edgesCounts, filename)
def writeListsToFile(times, ndis, edgesCounts, filename):
with open(filename, 'w+') as file:
file.write("times = " + str(times) + ";\n")
file.write("ndis = " + str(ndis) + ";\n")
file.write("edges = " + str(edgesCounts) + ";\n")
plotCode = """
figure
plot(times, ndis)
xlabel('Time (timestamp)')
ylabel('Network Disagreement Index (NDI)')
"""
file.write(plotCode)
def repliesBeforeTime(replies, time):
return [reply for reply in replies if reply["timestamp"] <= time]
def usersWhoSentMultipleMessages(forum=1):
query = """
SELECT m.fkauthor, f.id, COUNT(*) as count
FROM threadmessage as m
INNER JOIN forumthread as t
ON m.fkforumthread=t.id
INNER JOIN forum as f
ON t.fkparentforum=f.id
WHERE f.id=%s
GROUP BY m.fkauthor
HAVING count>1
""" % forum
users = []
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
cursor.execute(query)
for user in cursor:
users += [user]
cursor.close()
return users
def usersWhoParticipateInForum(forum=1):
query = """
SELECT m.fkauthor, f.id, COUNT(*) as count
FROM threadmessage as m
INNER JOIN forumthread as t
ON m.fkforumthread=t.id
INNER JOIN forum as f
ON t.fkparentforum=f.id
WHERE f.id=%s
GROUP BY m.fkauthor
""" % forum
users = []
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
cursor.execute(query)
for user in cursor:
users += [user]
cursor.close()
return users
def getUserCount():
query = "SELECT COUNT(*) as count FROM user"
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
cursor.execute(query)
for row in cursor:
count = row[0]
cursor.close()
return count
def forumContributionRanking():
forums = getForums()
ranking = []
for forum in forums:
forumID = forum["id"]
usersWhoSentMoreThanOneMessage = usersWhoSentMultipleMessages(forum=forumID)
users = usersWhoParticipateInForum(forum=forumID)
gtOne = len(usersWhoSentMoreThanOneMessage)
total = len(users)
ratio = gtOne/float(total)*100
line = (forumID, gtOne, total, str(ratio) + "%")
ranking += [line]
ranking = sorted(ranking, key=lambda x: x[1]/float(x[2]), reverse=True)
headings = ["forum id", "users with sent messages count > 1", "users who sent at least 1 message count", "ratio"]
writeRawHTML(rankingsFile, headings, ranking, readWriteMode='a')
if __name__ == "__main__":
(g1, g2) = readGraphs()
print(len(g1.nodes()))
print(len(g2.nodes()))
writeHeader(rankingsFile)
print("-----")
print("Centrality Rankings for graph 1")
print("-----")
writeH1(rankingsFile, "Replies graph centrality measures (directed weighted)")
createRanking(g1, directed=True)
print()
print("-----")
print("Centrality Rankings for graph 2")
print("-----")
writeH1(rankingsFile, "User-Discussion graph centrality measures (bipartite)")
createRanking(g2, ev=False) # do not compute Eigenvector centrality
print()
print("-----")
print("Conclusion")
print("-----")
writeH1(rankingsFile, "Conclusion")
createGeneralRanking()
createPolarityRanking()
polarizationForAllForums()
forumContributionRanking()
writeFooter(rankingsFile)
print()
print("Successfully generated HTML report.") | Python |
'''
Created on 8 avr. 2013
@author: Nils Amiet
'''
# coding: utf-8
class HTMLTableGenerator:
infrarougeUserURL = "http://www.infrarouge.ch/ir/member-"
tableBorder = 1
def __init__(self, th=[], rows=[], title="Untitled page", users=None):
self.th = th
self.rows = rows
self.title = title
self.users = users
def header(self):
html = "<!DOCTYPE html>"
html += "\n"
html += "<html>"
html += "\n"
html += "<head>"
html += "<title>"
html += str(self.title)
html += "</title>"
html += "</head>"
html += "\n"
html += "<body>"
html += "\n"
return html
def footer(self):
html = "</body>"
html += "\n"
html += "</html>"
return html
def h1(self, text):
html = "<h1>" + text + "</h1>"
return html
def raw(self):
html = "<table border=\"" + str(self.tableBorder) + "\">"
html += "\n"
html += "<tr>"
for heading in self.th:
html += "<th>" + str(heading) + "</th>"
html += "</tr>"
html += "\n"
for row in self.rows:
html += "<tr>"
for col in row:
html += "<td>"
html += str(col)
html += "</td>"
html += "</tr>"
html += "\n"
html += "</table>"
html += "\n"
html += "<br />"
html += "\n"
return html
def __repr__(self):
html = "<table border=\"" + str(self.tableBorder) + "\">"
html += "\n"
html += "<tr>"
for heading in self.th:
html += "<th>" + str(heading) + "</th>"
html += "</tr>"
html += "\n"
for row in self.rows:
html += "<tr>"
colID = 0
for col in row:
html += "<td>"
if colID == 0:
html += "<a href=\"" + self.infrarougeUserURL + str(col) + "\">"
html += str(col)
if colID == 0:
html += "</a>"
html += "</td>"
if colID == 0:
html += "<td>"
try:
userTuple = [user for user in self.users if user[0] == col][0]
username = userTuple[1]
except:
username = "username not found"
html += username
html += "</td>"
colID += 1
html += "</tr>"
html += "\n"
html += "</table>"
html += "\n"
html += "<br />"
html += "\n"
return html
| Python |
'''
Module which brings history information about files from Mercurial.
@author: Rodrigo Damazio
'''
import re
import subprocess
REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')
def _GetOutputLines(args):
'''
Runs an external process and returns its output as a list of lines.
@param args: the arguments to run
'''
process = subprocess.Popen(args,
stdout=subprocess.PIPE,
universal_newlines = True,
shell = False)
output = process.communicate()[0]
return output.splitlines()
def FillMercurialRevisions(filename, parsed_file):
'''
Fills the revs attribute of all strings in the given parsed file with
a list of revisions that touched the lines corresponding to that string.
@param filename: the name of the file to get history for
@param parsed_file: the parsed file to modify
'''
# Take output of hg annotate to get revision of each line
output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename])
# Create a map of line -> revision (key is list index, line 0 doesn't exist)
line_revs = ['dummy']
for line in output_lines:
rev_match = REVISION_REGEX.match(line)
if not rev_match:
raise 'Unexpected line of output from hg: %s' % line
rev_hash = rev_match.group('hash')
line_revs.append(rev_hash)
for str in parsed_file.itervalues():
# Get the lines that correspond to each string
start_line = str['startLine']
end_line = str['endLine']
# Get the revisions that touched those lines
revs = []
for line_number in range(start_line, end_line + 1):
revs.append(line_revs[line_number])
# Merge with any revisions that were already there
# (for explict revision specification)
if 'revs' in str:
revs += str['revs']
# Assign the revisions to the string
str['revs'] = frozenset(revs)
def DoesRevisionSuperceed(filename, rev1, rev2):
'''
Tells whether a revision superceeds another.
This essentially means that the older revision is an ancestor of the newer
one.
This also returns True if the two revisions are the same.
@param rev1: the revision that may be superceeding the other
@param rev2: the revision that may be superceeded
@return: True if rev1 superceeds rev2 or they're the same
'''
if rev1 == rev2:
return True
# TODO: Add filename
args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename]
output_lines = _GetOutputLines(args)
return rev2 in output_lines
def NewestRevision(filename, rev1, rev2):
'''
Returns which of two revisions is closest to the head of the repository.
If none of them is the ancestor of the other, then we return either one.
@param rev1: the first revision
@param rev2: the second revision
'''
if DoesRevisionSuperceed(filename, rev1, rev2):
return rev1
return rev2 | Python |
'''
Module which compares languague files to the master file and detects
issues.
@author: Rodrigo Damazio
'''
import os
from mytracks.parser import StringsParser
import mytracks.history
class Validator(object):
def __init__(self, languages):
'''
Builds a strings file validator.
Params:
@param languages: a dictionary mapping each language to its corresponding directory
'''
self._langs = {}
self._master = None
self._language_paths = languages
parser = StringsParser()
for lang, lang_dir in languages.iteritems():
filename = os.path.join(lang_dir, 'strings.xml')
parsed_file = parser.Parse(filename)
mytracks.history.FillMercurialRevisions(filename, parsed_file)
if lang == 'en':
self._master = parsed_file
else:
self._langs[lang] = parsed_file
self._Reset()
def Validate(self):
'''
Computes whether all the data in the files for the given languages is valid.
'''
self._Reset()
self._ValidateMissingKeys()
self._ValidateOutdatedKeys()
def valid(self):
return (len(self._missing_in_master) == 0 and
len(self._missing_in_lang) == 0 and
len(self._outdated_in_lang) == 0)
def missing_in_master(self):
return self._missing_in_master
def missing_in_lang(self):
return self._missing_in_lang
def outdated_in_lang(self):
return self._outdated_in_lang
def _Reset(self):
# These are maps from language to string name list
self._missing_in_master = {}
self._missing_in_lang = {}
self._outdated_in_lang = {}
def _ValidateMissingKeys(self):
'''
Computes whether there are missing keys on either side.
'''
master_keys = frozenset(self._master.iterkeys())
for lang, file in self._langs.iteritems():
keys = frozenset(file.iterkeys())
missing_in_master = keys - master_keys
missing_in_lang = master_keys - keys
if len(missing_in_master) > 0:
self._missing_in_master[lang] = missing_in_master
if len(missing_in_lang) > 0:
self._missing_in_lang[lang] = missing_in_lang
def _ValidateOutdatedKeys(self):
'''
Computers whether any of the language keys are outdated with relation to the
master keys.
'''
for lang, file in self._langs.iteritems():
outdated = []
for key, str in file.iteritems():
# Get all revisions that touched master and language files for this
# string.
master_str = self._master[key]
master_revs = master_str['revs']
lang_revs = str['revs']
if not master_revs or not lang_revs:
print 'WARNING: No revision for %s in %s' % (key, lang)
continue
master_file = os.path.join(self._language_paths['en'], 'strings.xml')
lang_file = os.path.join(self._language_paths[lang], 'strings.xml')
# Assume that the repository has a single head (TODO: check that),
# and as such there is always one revision which superceeds all others.
master_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2),
master_revs)
lang_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2),
lang_revs)
# If the master version is newer than the lang version
if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev):
outdated.append(key)
if len(outdated) > 0:
self._outdated_in_lang[lang] = outdated
| Python |
#!/usr/bin/python
'''
Entry point for My Tracks i18n tool.
@author: Rodrigo Damazio
'''
import mytracks.files
import mytracks.translate
import mytracks.validate
import sys
def Usage():
print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0]
print 'Commands are:'
print ' cleanup'
print ' translate'
print ' validate'
sys.exit(1)
def Translate(languages):
'''
Asks the user to interactively translate any missing or oudated strings from
the files for the given languages.
@param languages: the languages to translate
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
missing = validator.missing_in_lang()
outdated = validator.outdated_in_lang()
for lang in languages:
untranslated = missing[lang] + outdated[lang]
if len(untranslated) == 0:
continue
translator = mytracks.translate.Translator(lang)
translator.Translate(untranslated)
def Validate(languages):
'''
Computes and displays errors in the string files for the given languages.
@param languages: the languages to compute for
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
error_count = 0
if (validator.valid()):
print 'All files OK'
else:
for lang, missing in validator.missing_in_master().iteritems():
print 'Missing in master, present in %s: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, missing in validator.missing_in_lang().iteritems():
print 'Missing in %s, present in master: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, outdated in validator.outdated_in_lang().iteritems():
print 'Outdated in %s: %s:' % (lang, str(outdated))
error_count = error_count + len(outdated)
return error_count
if __name__ == '__main__':
argv = sys.argv
argc = len(argv)
if argc < 2:
Usage()
languages = mytracks.files.GetAllLanguageFiles()
if argc == 3:
langs = set(argv[2:])
if not langs.issubset(languages):
raise 'Language(s) not found'
# Filter just to the languages specified
languages = dict((lang, lang_file)
for lang, lang_file in languages.iteritems()
if lang in langs or lang == 'en' )
cmd = argv[1]
if cmd == 'translate':
Translate(languages)
elif cmd == 'validate':
error_count = Validate(languages)
else:
Usage()
error_count = 0
print '%d errors found.' % error_count
| Python |
'''
Module for dealing with resource files (but not their contents).
@author: Rodrigo Damazio
'''
import os.path
from glob import glob
import re
MYTRACKS_RES_DIR = 'MyTracks/res'
ANDROID_MASTER_VALUES = 'values'
ANDROID_VALUES_MASK = 'values-*'
def GetMyTracksDir():
'''
Returns the directory in which the MyTracks directory is located.
'''
path = os.getcwd()
while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)):
if path == '/':
raise 'Not in My Tracks project'
# Go up one level
path = os.path.split(path)[0]
return path
def GetAllLanguageFiles():
'''
Returns a mapping from all found languages to their respective directories.
'''
mytracks_path = GetMyTracksDir()
res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK)
language_dirs = glob(res_dir)
master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES)
if len(language_dirs) == 0:
raise 'No languages found!'
if not os.path.isdir(master_dir):
raise 'Couldn\'t find master file'
language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs]
language_tuples.append(('en', master_dir))
return dict(language_tuples)
| Python |
'''
Module which parses a string XML file.
@author: Rodrigo Damazio
'''
from xml.parsers.expat import ParserCreate
import re
#import xml.etree.ElementTree as ET
class StringsParser(object):
'''
Parser for string XML files.
This object is not thread-safe and should be used for parsing a single file at
a time, only.
'''
def Parse(self, file):
'''
Parses the given file and returns a dictionary mapping keys to an object
with attributes for that key, such as the value, start/end line and explicit
revisions.
In addition to the standard XML format of the strings file, this parser
supports an annotation inside comments, in one of these formats:
<!-- KEEP_PARENT name="bla" -->
<!-- KEEP_PARENT name="bla" rev="123456789012" -->
Such an annotation indicates that we're explicitly inheriting form the
master file (and the optional revision says that this decision is compatible
with the master file up to that revision).
@param file: the name of the file to parse
'''
self._Reset()
# Unfortunately expat is the only parser that will give us line numbers
self._xml_parser = ParserCreate()
self._xml_parser.StartElementHandler = self._StartElementHandler
self._xml_parser.EndElementHandler = self._EndElementHandler
self._xml_parser.CharacterDataHandler = self._CharacterDataHandler
self._xml_parser.CommentHandler = self._CommentHandler
file_obj = open(file)
self._xml_parser.ParseFile(file_obj)
file_obj.close()
return self._all_strings
def _Reset(self):
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
self._all_strings = {}
def _StartElementHandler(self, name, attrs):
if name != 'string':
return
if 'name' not in attrs:
return
assert not self._currentString
assert not self._currentStringName
self._currentString = {
'startLine' : self._xml_parser.CurrentLineNumber,
}
if 'rev' in attrs:
self._currentString['revs'] = [attrs['rev']]
self._currentStringName = attrs['name']
self._currentStringValue = ''
def _EndElementHandler(self, name):
if name != 'string':
return
assert self._currentString
assert self._currentStringName
self._currentString['value'] = self._currentStringValue
self._currentString['endLine'] = self._xml_parser.CurrentLineNumber
self._all_strings[self._currentStringName] = self._currentString
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
def _CharacterDataHandler(self, data):
if not self._currentString:
return
self._currentStringValue += data
_KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+'
r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?'
r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*',
re.MULTILINE | re.DOTALL)
def _CommentHandler(self, data):
keep_parent_match = self._KEEP_PARENT_REGEX.match(data)
if not keep_parent_match:
return
name = keep_parent_match.group('name')
self._all_strings[name] = {
'keepParent' : True,
'startLine' : self._xml_parser.CurrentLineNumber,
'endLine' : self._xml_parser.CurrentLineNumber
}
rev = keep_parent_match.group('rev')
if rev:
self._all_strings[name]['revs'] = [rev] | Python |
'''
Module which prompts the user for translations and saves them.
TODO: implement
@author: Rodrigo Damazio
'''
class Translator(object):
'''
classdocs
'''
def __init__(self, language):
'''
Constructor
'''
self._language = language
def Translate(self, string_names):
print string_names | Python |
#!/usr/bin/env python
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from StringIO import StringIO
from PIL import Image
import datauri
RGBA_BLACK = (0, 0, 0, 255)
sign_ = lambda n: -1 if n < 0 else (1 if n > 0 else 0)
def find_black_region_(im, sx, sy, ex, ey):
dx = sign_(ex - sx)
dy = sign_(ey - sy)
if abs(dx) == abs(dy):
raise 'findRegion_ can\'t look both horizontally and vertically at once.'
pixel_changes = []
pixel_on = False
x = sx
y = sy
while True:
if not pixel_on and im.getpixel((x, y)) == RGBA_BLACK:
pixel_changes.append((x, y))
pixel_on = True
elif pixel_on and im.getpixel((x, y)) != RGBA_BLACK:
pixel_changes.append((x, y))
pixel_on = False
x += dx
y += dy
if x == ex and y == ey:
break
return (pixel_changes[0][0 if dx else 1] - (sx if dx else sy),
pixel_changes[1][0 if dx else 1] - (sx if dx else sy))
def image_to_data_uri_(im):
f = StringIO()
im.save(f, 'PNG')
uri = datauri.to_data_uri(f.getvalue(), 'foo.png')
f.close()
return uri
def main():
src_im = Image.open(sys.argv[1])
# read and parse 9-patch stretch and padding regions
stretch_l, stretch_r = find_black_region_(src_im, 0, 0, src_im.size[0], 0)
stretch_t, stretch_b = find_black_region_(src_im, 0, 0, 0, src_im.size[1])
pad_l, pad_r = find_black_region_(src_im, 0, src_im.size[1] - 1, src_im.size[0], src_im.size[1] - 1)
pad_t, pad_b = find_black_region_(src_im, src_im.size[0] - 1, 0, src_im.size[0] - 1, src_im.size[1])
#padding_box = {}
template_params = {}
template_params['id'] = sys.argv[1]
template_params['icon_uri'] = image_to_data_uri_(src_im)
template_params['dim_constraint_attributes'] = '' # p:lockHeight="true"
template_params['image_uri'] = image_to_data_uri_(src_im.crop((1, 1, src_im.size[0] - 1, src_im.size[1] - 1)))
template_params['width_l'] = stretch_l - 1
template_params['width_r'] = src_im.size[0] - stretch_r - 1
template_params['height_t'] = stretch_t - 1
template_params['height_b'] = src_im.size[1] - stretch_b - 1
template_params['pad_l'] = pad_l - 1
template_params['pad_t'] = pad_t - 1
template_params['pad_r'] = src_im.size[0] - pad_r - 1
template_params['pad_b'] = src_im.size[1] - pad_b - 1
print open('res/shape_9patch_template.xml').read() % template_params
if __name__ == '__main__':
main() | Python |
#!/usr/bin/env python
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from StringIO import StringIO
from PIL import Image
import datauri
def image_to_data_uri_(im):
f = StringIO()
im.save(f, 'PNG')
uri = datauri.to_data_uri(f.getvalue(), 'foo.png')
f.close()
return uri
def main():
src_im = Image.open(sys.argv[1])
template_params = {}
template_params['id'] = sys.argv[1]
template_params['image_uri'] = image_to_data_uri_(src_im)
template_params['icon_uri'] = image_to_data_uri_(src_im)
template_params['width'] = src_im.size[0]
template_params['height'] = src_im.size[1]
print open('res/shape_png_template.xml').read() % template_params
if __name__ == '__main__':
main() | Python |
#!/usr/bin/env python
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import os.path
import shutil
import zipfile
def main():
params = {}
params['id'] = sys.argv[1]
params['displayname'] = sys.argv[2]
params['description'] = sys.argv[3]
zip_file = zipfile.ZipFile('dist/stencil-%s.zip' % params['id'], 'w',
zipfile.ZIP_DEFLATED)
# save stencil XML
shapes_xml = ''
shapes_folder = 'res/sets/%s/shapes' % params['id']
for shape_file in os.listdir(shapes_folder):
if not shape_file.endswith('.xml'):
continue
shape_xml = open(os.path.join(shapes_folder, shape_file)).read()
shapes_xml += shape_xml
params['shapes'] = shapes_xml
final_xml = open('res/stencil_template.xml').read() % params
zip_file.writestr('Definition.xml', final_xml)
# save icons
icons_folder = 'res/sets/%s/icons' % params['id']
for icon_file in os.listdir(icons_folder):
if not icon_file.endswith('.png'):
continue
zip_file.writestr(
'icons/%s' % icon_file,
open(os.path.join(icons_folder, icon_file), 'rb').read())
zip_file.close()
if __name__ == '__main__':
main() | Python |
#!/usr/bin/env python
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import sys
import mimetypes
def to_data_uri(data, file_name):
'''Takes a file object and returns its data: string.'''
mime_type = mimetypes.guess_type(file_name)
return 'data:%(mimetype)s;base64,%(data)s' % dict(mimetype=mime_type[0],
data=base64.b64encode(data))
def main():
print to_data_uri(open(sys.argv[1], 'rb').read(), sys.argv[1])
if __name__ == '__main__':
main() | Python |
# -*- coding: UTF-8 -*-
import datetime
import hashlib
import logging
import re
from google.appengine.api import mail
from google.appengine.api.labs import taskqueue
from google.appengine.ext import deferred
from notifiy import constants
from notifiy import model
from notifiy import templates
from notifiy import util
def send_message(pwp, modified_by, title, wave_id, wavelet_id, blip_id, message, extra=None):
if not message: return
pp = model.ParticipantPreferences.get_by_pk(pwp.participant)
if not pp.email: return
wave_url = util.get_url(pwp.participant, wave_id)
prefs_url = util.get_url(pwp.participant, pp.preferences_wave_id)
unsuscribe_email = 'remove-%s@%s.appspotmail.com' % (util.modified_b64encode(pwp.participant), constants.ROBOT_ID)
body = templates.MESSAGE_TEMPLATE % (message, wave_url, prefs_url, unsuscribe_email)
m = hashlib.md5()
m.update(unicode(title).encode("UTF-8"))
m.update(unicode(message).encode("UTF-8"))
if extra:
m.update(str(datetime.datetime.now()))
text_hash = m.hexdigest()
name = '%s-%s-%s' % (wave_id, pp.email, text_hash)
name = re.compile('[^a-zA-Z0-9-]').sub('X', name)
try:
deferred.defer(post,
participant=pwp.participant,
mail_from='%s <%s>' % (modified_by, constants.ROBOT_EMAIL),
mail_to=pp.email,
subject=title,
wave_id=wave_id,
wavelet_id=wavelet_id,
blip_id=blip_id,
body=body,
_queue='send-email',
_name=name)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError), e:
logging.warn('Repeated email %s', e)
def post(participant, mail_from, mail_to, subject, wave_id, wavelet_id, blip_id, body):
participant = util.modified_b64encode(participant)
wave_id = util.modified_b64encode(wave_id)
wavelet_id = util.modified_b64encode(wavelet_id)
blip_id = util.modified_b64encode(blip_id)
reply_to = '%s <%s.%s.%s.%s@%s.appspotmail.com>' % \
(constants.ROBOT_NAME.title(), participant, wave_id, wavelet_id,
blip_id, constants.ROBOT_ID)
logging.debug('emailing %s "%s"', mail_to, subject)
mail.send_mail(mail_from, mail_to, '[wave] %s' % subject, body, reply_to=reply_to)
| Python |
# -*- coding: UTF-8 -*-
from google.appengine.ext import deferred
from notifiy import email
from notifiy import model
from notifiy import phone
from notifiy import templates
def notify_created(wavelet, blip, modified_by):
"""Sends a created notification to all participants except the modified_by"""
for participant in wavelet.participants:
if participant == modified_by: continue
notify_participant(participant, wavelet, modified_by, blip, blip.text)
def notify_submitted(wavelet, blip, modified_by, message=None):
"""Sends a submitted notification to all participants except the modified_by"""
for participant in wavelet.participants:
if participant == modified_by: continue
notify_participant(participant, wavelet, modified_by, blip,
message or (blip and blip.text) or '[no content]')
def notify_removed(wavelet, modified_by):
"""Sends a deleted notification to all participants except the modified_by"""
for participant in wavelet.participants:
if participant == modified_by: continue
notify_participant(participant, wavelet, modified_by,
wavelet.root_blip, templates.CONTENT_DELETED)
def notify_participant(participant, wavelet, modified_by, blip, message):
deferred.defer(notify_participant_deferred,
participant=participant,
modified_by=modified_by,
title=wavelet.title,
wave_id=wavelet.wave_id,
wavelet_id=wavelet.wavelet_id,
blip_id=blip and blip.blip_id or '',
message=message,
_queue='notify-participant')
def notify_participant_deferred(participant, modified_by, title, wave_id, wavelet_id, blip_id, message):
"""Sends a notification to the participant"""
pp = model.ParticipantPreferences.get_by_pk(participant)
if not pp or not pp.notify: return
pwp = model.ParticipantWavePreferences.get_by_pk(participant, wave_id)
if not pwp or pwp.notify_type == model.NOTIFY_NONE: return
if pwp.notify_type == model.NOTIFY_ONCE:
if not pwp.visited: return
message = templates.NOTIFY_ONCE_TEMPLATE % message
pwp.visited = False
email.send_message(pwp, modified_by, title, wave_id, wavelet_id, blip_id, message)
phone.send_message(pwp, modified_by, title, wave_id, wavelet_id, blip_id, message)
| Python |
# -*- coding: UTF-8 -*-
from google.appengine.ext import db
from google.appengine.ext import deferred
from notifiy import email
from notifiy import gadget
from notifiy import preferences
from notifiy import templates
from notifiy import model
def wavelet_init(wavelet, modified_by):
"""Initialize the wavelet"""
gadget.gadget_add(wavelet)
for participant in wavelet.participants:
participant_wavelet_init(wavelet, participant, modified_by,
message=templates.ROBOT_ADDED)
def participant_init(wavelet, participant):
"""Initialize the participant and return it"""
pp = model.ParticipantPreferences.get_by_pk(participant)
if pp: return pp
pp = model.ParticipantPreferences.get_by_pk(participant, create=True)
if participant.endswith('@googlewave.com'):
pp.email = '%s@gmail.com' % participant.split('@')[0]
pp.put()
preferences.create_preferences_wave(wavelet.robot, participant)
return pp
# TODO do this deferred
def participant_wavelet_init_deferred(wavelet, participant, modified_by, message):
deferred.defer(participant_wavelet_init_deferred, wavelet, participant,
modified_by, message, _queue='participant-wavelet-init')
def participant_wavelet_init(wavelet, participant, modified_by, message=None):
"""Initialize the participant in the wavelet"""
pwp = model.ParticipantWavePreferences.get_by_pk(participant, wavelet.wave_id)
if pwp: return
pwp = model.ParticipantWavePreferences.get_by_pk(participant, wavelet.wave_id, create=True)
pp = participant_init(wavelet, participant)
if not pp.notify_initial: return
email.send_message(pwp, modified_by, wavelet.title, wavelet.wave_id,
wavelet.wavelet_id, wavelet.root_blip.blip_id, message)
def wavelet_deinit(wavelet):
"""De-initialize the wavelet"""
gadget.gadget_remove(wavelet)
def participant_deinit(wavelet, participant):
"""De-initialize the participant, removes al records available and the preferences wave"""
query = model.ParticipantPreferences.all()
query.filter("participant =", participant)
db.delete(query)
query = model.ParticipantWavePreferences.all()
query.filter("participant =", participant)
db.delete(query)
preferences.delete_preferences_wavelet(wavelet)
| Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from google.appengine.ext import webapp
from notifiy import constants
class Home(webapp.RequestHandler):
def get(self):
self.redirect(constants.ROBOT_HOME_PAGE)
| Python |
# -*- coding: UTF-8 -*-
import base64
import urllib
def get_url(participant, wave_id):
domain = participant.split('@')[1]
if wave_id:
wave_id = urllib.quote(urllib.quote(wave_id))
if wave_id and domain == 'googlewave.com':
return 'https://wave.google.com/wave/#restored:wave:%s' % wave_id
elif wave_id:
return 'https://wave.google.com/a/%s/#restored:wave:%s' % (wave_id, domain)
else:
return ''
def modified_b64encode(s):
if type(s) == unicode:
s = s.decode('UTF-8')
return base64.urlsafe_b64encode(s).replace('=', '')
def modified_b64decode(s):
while len(s) % 4 != 0:
s = s + '='
return base64.urlsafe_b64decode(s).encode('UTF-8')
def process_body(body):
new_body = []
content_buffer = []
for line in body.split('\n'):
if not line:
new_body = new_body + content_buffer + [ line ]
content_buffer = []
elif line.strip()[0] == '>':
content_buffer = []
else:
content_buffer.append(line)
new_body = new_body + content_buffer
return '\n'.join(new_body).strip()
def fetch_wavelet(wave_id, wavelet_id, participant):
from notifiy.robot import create_robot
robot = create_robot(run=False, domain=participant.split('@')[1])
# TODO return robot.fetch_wavelet(wave_id, wavelet_id, participant)
return robot.fetch_wavelet(wave_id, wavelet_id)
def reply_wavelet(wave_id, wavelet_id, blip_id, participant, message):
wavelet = fetch_wavelet(wave_id, wavelet_id, participant)
body = '%s: %s' % (participant, message) # TODO remove when proxy_for works
if blip_id in wavelet.blips:
blip = wavelet.blips[blip_id]
blip = blip.reply()
blip.append(body)
else:
blip = wavelet.reply(body)
wavelet.robot.submit(wavelet)
from notifiy import notifications
notifications.notify_submitted(wavelet, blip, participant, message)
| Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import urllib
import datetime
from google.appengine.ext import webapp
from google.appengine.ext import deferred
from waveapi import simplejson
from notifiy import model
from notifiy import general
from notifiy import preferences
from notifiy.robot import create_robot
class Process(webapp.RequestHandler):
def get(self):
self.response.contentType = 'application/json'
path = [urllib.unquote(a) for a in self.request.path.split('/')[2:]]
notification_type = path[0]
if not hasattr(self, notification_type): return
self.participant = self.request.get('participant')
self.wave_id = self.request.get('wave_id')
getattr(self, notification_type)()
def status(self):
self.toggle(False)
def toggle(self, toggle=True):
pp = model.ParticipantPreferences.get_by_pk(self.participant)
pwp = model.ParticipantWavePreferences.get_by_pk(self.participant, self.wave_id, create=toggle)
data = ''
if pwp:
if toggle:
pwp.notify_type = (pwp.notify_type + 1) % model.NOTIFY_TYPE_COUNT
pwp.put()
status = pwp.notify_type
email = pwp.notify_type
phones = [ 1 ] # TODO count phones
if len(phones) == 0:
phone = -1
if pwp.notify_type != model.NOTIFY_NONE:
phone = model.NOTIFY_ONCE
else:
phone = model.NOTIFY_NONE
data = simplejson.dumps({ 'status': status,
'email': email,
'phone': phone,
'preferencesWaveId': pp and pp.preferences_wave_id or '' })
else:
data = simplejson.dumps({ 'status': 0,
'email': 0,
'phone': 0,
'preferencesWaveId': pp and pp.preferences_wave_id or '' })
self.response.out.write(data);
def offline(self):
self.online(False)
def online(self, online=True):
pwp = model.ParticipantWavePreferences.get_by_pk(self.participant, self.wave_id)
if pwp:
pwp.last_visited = datetime.datetime.now()
pwp.put()
if not online:
visited(pwp.participant, self.wave_id, pwp)
else:
deferred.defer(visited, pwp.participant, pwp.wave_id,
pwp.last_visited, _queue='visited', _countdown=150)
self.response.out.write(simplejson.dumps({ 'status': 0 }))
def reset(self):
domain = self.participant.split('@')[1]
robot = create_robot(run=False, domain=domain)
preferences.create_preferences_wave(robot, self.participant)
#wavelet = robot.fetch_wavelet(self.wave_id, '%s!root+conv' % domain)
#general.participant_init(wavelet, self.participant)
#general.participant_wavelet_init(wavelet, self.participant, self.participant)
self.response.out.write(simplejson.dumps({ 'status': 0 }))
def confirm(self):
email = self.request.get('email')
activation = self.request.get('activation')
def visited(participant, wave_id=None, last_visited=None):
if not wave_id: return
pwp = model.ParticipantWavePreferences.get_by_pk(participant, wave_id)
if pwp.last_visited == last_visited:
pwp.visited = True
pwp.put()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.