code stringlengths 1 1.49M | vector listlengths 0 7.38k | snippet listlengths 0 7.38k |
|---|---|---|
#!/usr/bin/python
import os
import subprocess
import sys
BASEDIR = '../main/src/com/joelapenna/foursquare'
TYPESDIR = '../captures/types/v1'
captures = sys.argv[1:]
if not captures:
captures = os.listdir(TYPESDIR)
for f in captures:
basename = f.split('.')[0]
javaname = ''.join([c.capitalize() for c in basename.split('_')])
fullpath = os.path.join(TYPESDIR, f)
typepath = os.path.join(BASEDIR, 'types', javaname + '.java')
parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java')
cmd = 'python gen_class.py %s > %s' % (fullpath, typepath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
| [
[
1,
0,
0.1111,
0.037,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.1481,
0.037,
0,
0.66,
0.1429,
394,
0,
1,
0,
0,
394,
0,
0
],
[
1,
0,
0.1852,
0.037,
0,
0.6... | [
"import os",
"import subprocess",
"import sys",
"BASEDIR = '../main/src/com/joelapenna/foursquare'",
"TYPESDIR = '../captures/types/v1'",
"captures = sys.argv[1:]",
"if not captures:\n captures = os.listdir(TYPESDIR)",
" captures = os.listdir(TYPESDIR)",
"for f in captures:\n basename = f.split('... |
#! /usr/bin/env python
import sys, os
import tarfile, zipfile, gzip, bz2
from optparse import OptionParser
"""
Builds packaged releases of DebugKit so I don't have to do things manually.
Excludes itself (build.py), .gitignore, .DS_Store and the .git folder from the archives.
"""
def main():
parser = OptionParser();
parser.add_option('-o', '--output-dir', dest="output_dir",
help="write the packages to DIR", metavar="DIR")
parser.add_option('-p', '--prefix-name', dest="prefix",
help="prefix used for the generated files")
parser.add_option('-k', '--skip', dest="skip", default="",
help="A comma separated list of files to skip")
parser.add_option('-s', '--source-dir', dest="source", default=".",
help="The source directory for the build process")
(options, args) = parser.parse_args()
if options.output_dir == '' or options.output_dir == options.source:
print 'Requires an output dir, and that output dir cannot be the same as the source one!'
exit()
# append .git and build.py to the skip files
skip = options.skip.split(',')
skip.extend(['.git', '.gitignore', '.DS_Store', 'build.py'])
# get list of files in top level dir.
files = os.listdir(options.source)
os.chdir(options.source)
# filter the files, I couldn't figure out how to do it in a more concise way.
for f in files[:]:
try:
skip.index(f)
files.remove(f)
except ValueError:
pass
# make a boring tar file
destfile = ''.join([options.output_dir, options.prefix])
tar_file_name = destfile + '.tar'
tar = tarfile.open(tar_file_name, 'w');
for f in files:
tar.add(f)
tar.close()
print "Generated tar file"
# make the gzip
if make_gzip(tar_file_name, destfile):
print "Generated gzip file"
else:
print "Could not generate gzip file"
# make the bz2
if make_bz2(tar_file_name, destfile):
print "Generated bz2 file"
else:
print "Could not generate bz2 file"
# make the zip file
zip_recursive(destfile + '.zip', options.source, files)
print "Generated zip file\n"
def make_gzip(tar_file, destination):
"""
Takes a tar_file and destination. Compressess the tar file and creates
a .tar.gzip
"""
tar_contents = open(tar_file, 'rb')
gzipfile = gzip.open(destination + '.tar.gz', 'wb')
gzipfile.writelines(tar_contents)
gzipfile.close()
tar_contents.close()
return True
def make_bz2(tar_file, destination):
"""
Takes a tar_file and destination. Compressess the tar file and creates
a .tar.bz2
"""
tar_contents = open(tar_file, 'rb')
bz2file = bz2.BZ2File(destination + '.tar.bz2', 'wb')
bz2file.writelines(tar_contents)
bz2file.close()
tar_contents.close()
return True
def zip_recursive(destination, source_dir, rootfiles):
"""
Recursively zips source_dir into destination.
rootfiles should contain a list of files in the top level directory that
are to be included. Any top level files not in rootfiles will be omitted
from the zip file.
"""
zipped = zipfile.ZipFile(destination, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(source_dir):
inRoot = False
if root == source_dir:
inRoot = True
if inRoot:
for d in dirs:
try:
rootfiles.index(d)
except ValueError:
dirs.remove(d)
for f in files[:]:
if inRoot:
try:
rootfiles.index(f)
except ValueError:
continue
fullpath = os.path.join(root, f)
zipped.write(fullpath)
zipped.close()
return destination
if __name__ == '__main__':
main() | [
[
1,
0,
0.0231,
0.0077,
0,
0.66,
0,
509,
0,
2,
0,
0,
509,
0,
0
],
[
1,
0,
0.0308,
0.0077,
0,
0.66,
0.125,
223,
0,
4,
0,
0,
223,
0,
0
],
[
1,
0,
0.0385,
0.0077,
0,
0... | [
"import sys, os",
"import tarfile, zipfile, gzip, bz2",
"from optparse import OptionParser",
"\"\"\"\nBuilds packaged releases of DebugKit so I don't have to do things manually.\n\nExcludes itself (build.py), .gitignore, .DS_Store and the .git folder from the archives.\n\"\"\"",
"def main():\n parser = O... |
#! /usr/bin/env python
import sys, os
import tarfile, zipfile, gzip, bz2
from optparse import OptionParser
"""
Builds packaged releases of DebugKit so I don't have to do things manually.
Excludes itself (build.py), .gitignore, .DS_Store and the .git folder from the archives.
"""
def main():
parser = OptionParser();
parser.add_option('-o', '--output-dir', dest="output_dir",
help="write the packages to DIR", metavar="DIR")
parser.add_option('-p', '--prefix-name', dest="prefix",
help="prefix used for the generated files")
parser.add_option('-k', '--skip', dest="skip", default="",
help="A comma separated list of files to skip")
parser.add_option('-s', '--source-dir', dest="source", default=".",
help="The source directory for the build process")
(options, args) = parser.parse_args()
if options.output_dir == '' or options.output_dir == options.source:
print 'Requires an output dir, and that output dir cannot be the same as the source one!'
exit()
# append .git and build.py to the skip files
skip = options.skip.split(',')
skip.extend(['.git', '.gitignore', '.DS_Store', 'build.py'])
# get list of files in top level dir.
files = os.listdir(options.source)
os.chdir(options.source)
# filter the files, I couldn't figure out how to do it in a more concise way.
for f in files[:]:
try:
skip.index(f)
files.remove(f)
except ValueError:
pass
# make a boring tar file
destfile = ''.join([options.output_dir, options.prefix])
tar_file_name = destfile + '.tar'
tar = tarfile.open(tar_file_name, 'w');
for f in files:
tar.add(f)
tar.close()
print "Generated tar file"
# make the gzip
if make_gzip(tar_file_name, destfile):
print "Generated gzip file"
else:
print "Could not generate gzip file"
# make the bz2
if make_bz2(tar_file_name, destfile):
print "Generated bz2 file"
else:
print "Could not generate bz2 file"
# make the zip file
zip_recursive(destfile + '.zip', options.source, files)
print "Generated zip file\n"
def make_gzip(tar_file, destination):
"""
Takes a tar_file and destination. Compressess the tar file and creates
a .tar.gzip
"""
tar_contents = open(tar_file, 'rb')
gzipfile = gzip.open(destination + '.tar.gz', 'wb')
gzipfile.writelines(tar_contents)
gzipfile.close()
tar_contents.close()
return True
def make_bz2(tar_file, destination):
"""
Takes a tar_file and destination. Compressess the tar file and creates
a .tar.bz2
"""
tar_contents = open(tar_file, 'rb')
bz2file = bz2.BZ2File(destination + '.tar.bz2', 'wb')
bz2file.writelines(tar_contents)
bz2file.close()
tar_contents.close()
return True
def zip_recursive(destination, source_dir, rootfiles):
"""
Recursively zips source_dir into destination.
rootfiles should contain a list of files in the top level directory that
are to be included. Any top level files not in rootfiles will be omitted
from the zip file.
"""
zipped = zipfile.ZipFile(destination, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(source_dir):
inRoot = False
if root == source_dir:
inRoot = True
if inRoot:
for d in dirs:
try:
rootfiles.index(d)
except ValueError:
dirs.remove(d)
for f in files[:]:
if inRoot:
try:
rootfiles.index(f)
except ValueError:
continue
fullpath = os.path.join(root, f)
zipped.write(fullpath)
zipped.close()
return destination
if __name__ == '__main__':
main() | [
[
1,
0,
0.0231,
0.0077,
0,
0.66,
0,
509,
0,
2,
0,
0,
509,
0,
0
],
[
1,
0,
0.0308,
0.0077,
0,
0.66,
0.125,
223,
0,
4,
0,
0,
223,
0,
0
],
[
1,
0,
0.0385,
0.0077,
0,
0... | [
"import sys, os",
"import tarfile, zipfile, gzip, bz2",
"from optparse import OptionParser",
"\"\"\"\nBuilds packaged releases of DebugKit so I don't have to do things manually.\n\nExcludes itself (build.py), .gitignore, .DS_Store and the .git folder from the archives.\n\"\"\"",
"def main():\n parser = O... |
#====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
#
# This software consists of voluntary contributions made by many
# individuals on behalf of the Apache Software Foundation. For more
# information on the Apache Software Foundation, please see
# <http://www.apache.org/>.
#
import os
import re
import tempfile
import shutil
ignore_pattern = re.compile('^(.svn|target|bin|classes)')
java_pattern = re.compile('^.*\.java')
annot_pattern = re.compile('import org\.apache\.http\.annotation\.')
def process_dir(dir):
files = os.listdir(dir)
for file in files:
f = os.path.join(dir, file)
if os.path.isdir(f):
if not ignore_pattern.match(file):
process_dir(f)
else:
if java_pattern.match(file):
process_source(f)
def process_source(filename):
tmp = tempfile.mkstemp()
tmpfd = tmp[0]
tmpfile = tmp[1]
try:
changed = False
dst = os.fdopen(tmpfd, 'w')
try:
src = open(filename)
try:
for line in src:
if annot_pattern.match(line):
changed = True
line = line.replace('import org.apache.http.annotation.', 'import net.jcip.annotations.')
dst.write(line)
finally:
src.close()
finally:
dst.close();
if changed:
shutil.move(tmpfile, filename)
else:
os.remove(tmpfile)
except:
os.remove(tmpfile)
process_dir('.')
| [
[
1,
0,
0.3514,
0.0135,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.3649,
0.0135,
0,
0.66,
0.1111,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.3784,
0.0135,
0,
... | [
"import os",
"import re",
"import tempfile",
"import shutil",
"ignore_pattern = re.compile('^(.svn|target|bin|classes)')",
"java_pattern = re.compile('^.*\\.java')",
"annot_pattern = re.compile('import org\\.apache\\.http\\.annotation\\.')",
"def process_dir(dir):\n files = os.listdir(dir)\n for... |
"""
Data format classes ("responders") that can be plugged
into model_resource.ModelResource and determine how
the objects of a ModelResource instance are rendered
(e.g. serialized to XML, rendered by templates, ...).
"""
from django.core import serializers
from django.core.handlers.wsgi import STATUS_CODE_TEXT
from django.core.paginator import QuerySetPaginator, InvalidPage
# the correct paginator for Model objects is the QuerySetPaginator,
# not the Paginator! (see Django doc)
from django.core.xheaders import populate_xheaders
from django import forms
from django.http import Http404, HttpResponse
from django.forms.util import ErrorDict
from django.shortcuts import render_to_response
from django.template import loader, RequestContext
from django.utils import simplejson
from django.utils.xmlutils import SimplerXMLGenerator
from django.views.generic.simple import direct_to_template
class SerializeResponder(object):
"""
Class for all data formats that are possible
with Django's serializer framework.
"""
def __init__(self, format, mimetype=None, paginate_by=None, allow_empty=False):
"""
format:
may be every format that works with Django's serializer
framework. By default: xml, python, json, (yaml).
mimetype:
if the default None is not changed, any HttpResponse calls
use settings.DEFAULT_CONTENT_TYPE and settings.DEFAULT_CHARSET
paginate_by:
Number of elements per page. Default: All elements.
"""
self.format = format
self.mimetype = mimetype
self.paginate_by = paginate_by
self.allow_empty = allow_empty
self.expose_fields = []
def render(self, object_list):
"""
Serializes a queryset to the format specified in
self.format.
"""
# Hide unexposed fields
hidden_fields = []
for obj in list(object_list):
for field in obj._meta.fields:
if not field.name in self.expose_fields and field.serialize:
field.serialize = False
hidden_fields.append(field)
response = serializers.serialize(self.format, object_list)
# Show unexposed fields again
for field in hidden_fields:
field.serialize = True
return response
def element(self, request, elem):
"""
Renders single model objects to HttpResponse.
"""
return HttpResponse(self.render([elem]), self.mimetype)
def error(self, request, status_code, error_dict=None):
"""
Handles errors in a RESTful way.
- appropriate status code
- appropriate mimetype
- human-readable error message
"""
if not error_dict:
error_dict = ErrorDict()
response = HttpResponse(mimetype = self.mimetype)
response.write('%d %s' % (status_code, STATUS_CODE_TEXT[status_code]))
if error_dict:
response.write('\n\nErrors:\n')
response.write(error_dict.as_text())
response.status_code = status_code
return response
def list(self, request, queryset, page=None):
"""
Renders a list of model objects to HttpResponse.
"""
if self.paginate_by:
paginator = QuerySetPaginator(queryset, self.paginate_by)
if not page:
page = request.GET.get('page', 1)
try:
page = int(page)
object_list = paginator.page(page).object_list
except (InvalidPage, ValueError):
if page == 1 and self.allow_empty:
object_list = []
else:
return self.error(request, 404)
else:
object_list = list(queryset)
return HttpResponse(self.render(object_list), self.mimetype)
class JSONResponder(SerializeResponder):
"""
JSON data format class.
"""
def __init__(self, paginate_by=None, allow_empty=False):
SerializeResponder.__init__(self, 'json', 'application/json',
paginate_by=paginate_by, allow_empty=allow_empty)
def error(self, request, status_code, error_dict=None):
"""
Return JSON error response that includes a human readable error
message, application-specific errors and a machine readable
status code.
"""
if not error_dict:
error_dict = ErrorDict()
response = HttpResponse(mimetype = self.mimetype)
response.status_code = status_code
response_dict = {
"error-message" : '%d %s' % (status_code, STATUS_CODE_TEXT[status_code]),
"status-code" : status_code,
"model-errors" : error_dict.as_ul()
}
simplejson.dump(response_dict, response)
return response
class XMLResponder(SerializeResponder):
"""
XML data format class.
"""
def __init__(self, paginate_by=None, allow_empty=False):
SerializeResponder.__init__(self, 'xml', 'application/xml',
paginate_by=paginate_by, allow_empty=allow_empty)
def error(self, request, status_code, error_dict=None):
"""
Return XML error response that includes a human readable error
message, application-specific errors and a machine readable
status code.
"""
from django.conf import settings
if not error_dict:
error_dict = ErrorDict()
response = HttpResponse(mimetype = self.mimetype)
response.status_code = status_code
xml = SimplerXMLGenerator(response, settings.DEFAULT_CHARSET)
xml.startDocument()
xml.startElement("django-error", {})
xml.addQuickElement(name="error-message", contents='%d %s' % (status_code, STATUS_CODE_TEXT[status_code]))
xml.addQuickElement(name="status-code", contents=str(status_code))
if error_dict:
xml.startElement("model-errors", {})
for (model_field, errors) in error_dict.items():
for error in errors:
xml.addQuickElement(name=model_field, contents=error)
xml.endElement("model-errors")
xml.endElement("django-error")
xml.endDocument()
return response
class TemplateResponder(object):
"""
Data format class that uses templates (similar to Django's
generic views).
"""
def __init__(self, template_dir, paginate_by=None, template_loader=loader,
extra_context=None, allow_empty=False, context_processors=None,
template_object_name='object', mimetype=None):
self.template_dir = template_dir
self.paginate_by = paginate_by
self.template_loader = template_loader
if not extra_context:
extra_context = {}
for key, value in extra_context.items():
if callable(value):
extra_context[key] = value()
self.extra_context = extra_context
self.allow_empty = allow_empty
self.context_processors = context_processors
self.template_object_name = template_object_name
self.mimetype = mimetype
self.expose_fields = None # Set by Collection.__init__
def _hide_unexposed_fields(self, obj, allowed_fields):
"""
Remove fields from a model that should not be public.
"""
for field in obj._meta.fields:
if not field.name in allowed_fields and \
not field.name + '_id' in allowed_fields:
obj.__dict__.pop(field.name)
def list(self, request, queryset, page=None):
"""
Renders a list of model objects to HttpResponse.
"""
template_name = '%s/%s_list.html' % (self.template_dir, queryset.model._meta.module_name)
if self.paginate_by:
paginator = QuerySetPaginator(queryset, self.paginate_by)
if not page:
page = request.GET.get('page', 1)
try:
page = int(page)
object_list = paginator.page(page).object_list
except (InvalidPage, ValueError):
if page == 1 and self.allow_empty:
object_list = []
else:
raise Http404
current_page = paginator.page(page)
c = RequestContext(request, {
'%s_list' % self.template_object_name: object_list,
'is_paginated': paginator.num_pages > 1,
'results_per_page': self.paginate_by,
'has_next': current_page.has_next(),
'has_previous': current_page.has_previous(),
'page': page,
'next': page + 1,
'previous': page - 1,
'last_on_page': current_page.end_index(),
'first_on_page': current_page.start_index(),
'pages': paginator.num_pages,
'hits' : paginator.count,
}, self.context_processors)
else:
object_list = queryset
c = RequestContext(request, {
'%s_list' % self.template_object_name: object_list,
'is_paginated': False
}, self.context_processors)
if not self.allow_empty and len(queryset) == 0:
raise Http404
# Hide unexposed fields
for obj in object_list:
self._hide_unexposed_fields(obj, self.expose_fields)
c.update(self.extra_context)
t = self.template_loader.get_template(template_name)
return HttpResponse(t.render(c), mimetype=self.mimetype)
def element(self, request, elem):
"""
Renders single model objects to HttpResponse.
"""
template_name = '%s/%s_detail.html' % (self.template_dir, elem._meta.module_name)
t = self.template_loader.get_template(template_name)
c = RequestContext(request, {
self.template_object_name : elem,
}, self.context_processors)
# Hide unexposed fields
self._hide_unexposed_fields(elem, self.expose_fields)
c.update(self.extra_context)
response = HttpResponse(t.render(c), mimetype=self.mimetype)
populate_xheaders(request, response, elem.__class__, getattr(elem, elem._meta.pk.name))
return response
def error(self, request, status_code, error_dict=None):
"""
Renders error template (template name: error status code).
"""
if not error_dict:
error_dict = ErrorDict()
response = direct_to_template(request,
template = '%s/%s.html' % (self.template_dir, str(status_code)),
extra_context = { 'errors' : error_dict },
mimetype = self.mimetype)
response.status_code = status_code
return response
def create_form(self, request, queryset, form_class):
"""
Render form for creation of new collection entry.
"""
ResourceForm = forms.form_for_model(queryset.model, form=form_class)
if request.POST:
form = ResourceForm(request.POST)
else:
form = ResourceForm()
template_name = '%s/%s_form.html' % (self.template_dir, queryset.model._meta.module_name)
return render_to_response(template_name, {'form':form})
def update_form(self, request, pk, queryset, form_class):
"""
Render edit form for single entry.
"""
# Remove queryset cache by cloning the queryset
queryset = queryset._clone()
elem = queryset.get(**{queryset.model._meta.pk.name : pk})
ResourceForm = forms.form_for_instance(elem, form=form_class)
if request.PUT:
form = ResourceForm(request.PUT)
else:
form = ResourceForm()
template_name = '%s/%s_form.html' % (self.template_dir, elem._meta.module_name)
return render_to_response(template_name,
{'form':form, 'update':True, self.template_object_name:elem})
| [
[
8,
0,
0.0117,
0.0201,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0234,
0.0033,
0,
0.66,
0.0625,
913,
0,
1,
0,
0,
913,
0,
0
],
[
1,
0,
0.0268,
0.0033,
0,
0.66... | [
"\"\"\"\nData format classes (\"responders\") that can be plugged \ninto model_resource.ModelResource and determine how\nthe objects of a ModelResource instance are rendered\n(e.g. serialized to XML, rendered by templates, ...).\n\"\"\"",
"from django.core import serializers",
"from django.core.handlers.wsgi im... |
"""
Generic resource class.
"""
from django.utils.translation import ugettext as _
from authentication import NoAuthentication
from django.core.urlresolvers import reverse as _reverse
from django.http import Http404, HttpResponse, HttpResponseNotAllowed
def load_put_and_files(request):
"""
Populates request.PUT and request.FILES from
request.raw_post_data. PUT and POST requests differ
only in REQUEST_METHOD, not in the way data is encoded.
Therefore we can use Django's POST data retrieval method
for PUT.
"""
if request.method == 'PUT':
request.method = 'POST'
request._load_post_and_files()
request.method = 'PUT'
request.PUT = request.POST
del request._post
def reverse(viewname, args=(), kwargs=None):
"""
Return the URL associated with a view and specified parameters.
If the regular expression used specifies an optional slash at
the end of the URL, add the slash.
"""
if not kwargs:
kwargs = {}
url = _reverse(viewname, None, args, kwargs)
if url[-2:] == '/?':
url = url[:-1]
return url
class HttpMethodNotAllowed(Exception):
"""
Signals that request.method was not part of
the list of permitted methods.
"""
class ResourceBase(object):
"""
Base class for both model-based and non-model-based
resources.
"""
def __init__(self, authentication=None, permitted_methods=None):
"""
authentication:
the authentication instance that checks whether a
request is authenticated
permitted_methods:
the HTTP request methods that are allowed for this
resource e.g. ('GET', 'PUT')
"""
# Access restrictions
if not authentication:
authentication = NoAuthentication()
self.authentication = authentication
if not permitted_methods:
permitted_methods = ["GET"]
self.permitted_methods = [m.upper() for m in permitted_methods]
def dispatch(self, request, target, *args, **kwargs):
"""
"""
request_method = request.method.upper()
if request_method not in self.permitted_methods:
raise HttpMethodNotAllowed
if request_method == 'GET':
return target.read(request, *args, **kwargs)
elif request_method == 'POST':
return target.create(request, *args, **kwargs)
elif request_method == 'PUT':
load_put_and_files(request)
return target.update(request, *args, **kwargs)
elif request_method == 'DELETE':
return target.delete(request, *args, **kwargs)
else:
raise Http404
def get_url(self):
"""
Returns resource URL.
"""
return reverse(self)
# The four CRUD methods that any class that
# inherits from Resource may implement:
def create(self, request):
raise Http404
def read(self, request):
raise Http404
def update(self, request):
raise Http404
def delete(self, request):
raise Http404
class Resource(ResourceBase):
"""
Generic resource class that can be used for
resources that are not based on Django models.
"""
def __init__(self, authentication=None, permitted_methods=None,
mimetype=None):
"""
authentication:
the authentication instance that checks whether a
request is authenticated
permitted_methods:
the HTTP request methods that are allowed for this
resource e.g. ('GET', 'PUT')
mimetype:
if the default None is not changed, any HttpResponse calls
use settings.DEFAULT_CONTENT_TYPE and settings.DEFAULT_CHARSET
"""
ResourceBase.__init__(self, authentication, permitted_methods)
self.mimetype = mimetype
def __call__(self, request, *args, **kwargs):
"""
Redirects to one of the CRUD methods depending
on the HTTP method of the request. Checks whether
the requested method is allowed for this resource.
"""
# Check permission
if not self.authentication.is_authenticated(request):
response = HttpResponse(_('Authorization Required'), mimetype=self.mimetype)
challenge_headers = self.authentication.challenge_headers()
for k,v in challenge_headers.items():
response[k] = v
response.status_code = 401
return response
try:
return self.dispatch(request, self, *args, **kwargs)
except HttpMethodNotAllowed:
response = HttpResponseNotAllowed(self.permitted_methods)
response.mimetype = self.mimetype
return response
| [
[
8,
0,
0.0135,
0.0203,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.027,
0.0068,
0,
0.66,
0.1111,
389,
0,
1,
0,
0,
389,
0,
0
],
[
1,
0,
0.0338,
0.0068,
0,
0.66,... | [
"\"\"\"\nGeneric resource class.\n\"\"\"",
"from django.utils.translation import ugettext as _",
"from authentication import NoAuthentication",
"from django.core.urlresolvers import reverse as _reverse",
"from django.http import Http404, HttpResponse, HttpResponseNotAllowed",
"def load_put_and_files(reque... |
"""
Data format classes that can be plugged into
model_resource.ModelResource and determine how submissions
of model data need to look like (e.g. form submission MIME types,
XML, JSON, ...).
"""
from django.core import serializers
from django.forms import model_to_dict
class InvalidFormData(Exception):
"""
Raised if form data can not be decoded into key-value
pairs.
"""
class Receiver(object):
"""
Base class for all "receiver" data format classes.
All subclasses need to implement the method
get_data(self, request, method).
"""
def get_data(self, request, method):
raise Exception("Receiver subclass needs to implement get_data!")
def get_post_data(self, request):
return self.get_data(request, 'POST')
def get_put_data(self, request):
return self.get_data(request, 'PUT')
class FormReceiver(Receiver):
"""
Data format class with standard Django behavior:
POST and PUT data is in form submission format.
"""
def get_data(self, request, method):
return getattr(request, method)
class SerializeReceiver(Receiver):
"""
Base class for all data formats possible
within Django's serializer framework.
"""
def __init__(self, format):
self.format = format
def get_data(self, request, method):
try:
deserialized_objects = list(serializers.deserialize(self.format, request.raw_post_data))
except serializers.base.DeserializationError:
raise InvalidFormData
if len(deserialized_objects) != 1:
raise InvalidFormData
model = deserialized_objects[0].object
return model_to_dict(model)
class JSONReceiver(SerializeReceiver):
"""
Data format class for form submission in JSON,
e.g. for web browsers.
"""
def __init__(self):
self.format = 'json'
class XMLReceiver(SerializeReceiver):
"""
Data format class for form submission in XML,
e.g. for software clients.
"""
def __init__(self):
self.format = 'xml'
| [
[
8,
0,
0.0486,
0.0833,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0972,
0.0139,
0,
0.66,
0.125,
913,
0,
1,
0,
0,
913,
0,
0
],
[
1,
0,
0.1111,
0.0139,
0,
0.66,... | [
"\"\"\"\nData format classes that can be plugged into \nmodel_resource.ModelResource and determine how submissions\nof model data need to look like (e.g. form submission MIME types,\nXML, JSON, ...).\n\"\"\"",
"from django.core import serializers",
"from django.forms import model_to_dict",
"class InvalidFormD... |
"""
Model-bound resource class.
"""
from django import forms
from django.conf.urls.defaults import patterns
from django.http import *
from django.forms import ModelForm, models
from django.forms.util import ErrorDict
from django.utils.functional import curry
from django.utils.translation.trans_null import _
from resource import ResourceBase, load_put_and_files, reverse, HttpMethodNotAllowed
from receiver import FormReceiver
class InvalidModelData(Exception):
"""
Raised if create/update fails because the PUT/POST
data is not appropriate.
"""
def __init__(self, errors=None):
if not errors:
errors = ErrorDict()
self.errors = errors
class Collection(ResourceBase):
"""
Resource for a collection of models (queryset).
"""
def __init__(self, queryset, responder, receiver=None, authentication=None,
permitted_methods=None, expose_fields=None, entry_class=None,
form_class=None):
"""
queryset:
determines the subset of objects (of a Django model)
that make up this resource
responder:
the data format instance that creates HttpResponse
objects from single or multiple model objects and
renders forms
receiver:
the data format instance that handles POST and
PUT data
authentication:
the authentication instance that checks whether a
request is authenticated
permitted_methods:
the HTTP request methods that are allowed for this
resource e.g. ('GET', 'PUT')
expose_fields:
the model fields that can be accessed
by the HTTP methods described in permitted_methods
entry_class:
class used for entries in create() and get_entry();
default: class Entry (see below)
form_class:
base form class used for data validation and
conversion in self.create() and Entry.update()
"""
# Available data
self.queryset = queryset
# Input format
if not receiver:
receiver = FormReceiver()
self.receiver = receiver
# Input validation
if not form_class:
form_class = ModelForm
self.form_class = form_class
# Output format / responder setup
self.responder = responder
if not expose_fields:
expose_fields = [field.name for field in queryset.model._meta.fields]
responder.expose_fields = expose_fields
if hasattr(responder, 'create_form'):
responder.create_form = curry(responder.create_form, queryset=queryset, form_class=form_class)
if hasattr(responder, 'update_form'):
responder.update_form = curry(responder.update_form, queryset=queryset, form_class=form_class)
# Resource class for individual objects of the collection
if not entry_class:
entry_class = Entry
self.entry_class = entry_class
ResourceBase.__init__(self, authentication, permitted_methods)
def __call__(self, request, *args, **kwargs):
"""
Redirects to one of the CRUD methods depending
on the HTTP method of the request. Checks whether
the requested method is allowed for this resource.
Catches errors.
"""
# Check authentication
if not self.authentication.is_authenticated(request):
response = self.responder.error(request, 401)
challenge_headers = self.authentication.challenge_headers()
for k,v in challenge_headers.items():
response[k] = v
return response
# Remove queryset cache
self.queryset = self.queryset._clone()
# Determine whether the collection or a specific
# entry is requested. If not specified as a keyword
# argument, assume that any args/kwargs are used to
# select a specific entry from the collection.
if kwargs.has_key('is_entry'):
is_entry = kwargs.pop('is_entry')
else:
eval_args = tuple(x for x in args if x != '' and x != None)
eval_kwargs = tuple(x for x in kwargs.values()
if x != '' and x != None)
is_entry = bool(eval_args or eval_kwargs)
# Redirect either to entry method
# or to collection method. Catch errors.
try:
if is_entry:
entry = self.get_entry(*args, **kwargs)
return self.dispatch(request, entry)
else:
return self.dispatch(request, self)
except HttpMethodNotAllowed:
response = self.responder.error(request, 405)
response['Allow'] = ', '.join(self.permitted_methods)
return response
except (self.queryset.model.DoesNotExist, Http404):
return self.responder.error(request, 404)
except InvalidModelData, i:
return self.responder.error(request, 400, i.errors)
# No other methods allowed: 400 Bad Request
return self.responder.error(request, 400)
def create(self, request):
"""
Creates a resource with attributes given by POST, then
redirects to the resource URI.
"""
# Create form filled with POST data
ResourceForm = models.modelform_factory(self.queryset.model, form=self.form_class)
data = self.receiver.get_post_data(request)
form = ResourceForm(data)
# If the data contains no errors, save the model,
# return a "201 Created" response with the model's
# URI in the location header and a representation
# of the model in the response body.
if form.is_valid():
new_model = form.save()
model_entry = self.entry_class(self, new_model)
response = model_entry.read(request)
response.status_code = 201
response['Location'] = model_entry.get_url()
return response
# Otherwise return a 400 Bad Request error.
raise InvalidModelData(form.errors)
def read(self, request):
"""
Returns a representation of the queryset.
The format depends on which responder (e.g. JSONResponder)
is assigned to this ModelResource instance. Usually called by a
HTTP request to the factory URI with method GET.
"""
return self.responder.list(request, self.queryset)
def get_entry(self, pk_value):
"""
Returns a single entry retrieved by filtering the
collection queryset by primary key value.
"""
model = self.queryset.get(**{self.queryset.model._meta.pk.name : pk_value})
entry = self.entry_class(self, model)
return entry
class Entry(object):
"""
Resource for a single model.
"""
def __init__(self, collection, model):
self.collection = collection
self.model = model
def get_url(self):
"""
Returns the URL for this resource object.
"""
pk_value = getattr(self.model, self.model._meta.pk.name)
return reverse(self.collection, (pk_value,))
def create(self, request):
raise Http404
def read(self, request):
"""
Returns a representation of a single model.
The format depends on which responder (e.g. JSONResponder)
is assigned to this ModelResource instance. Usually called by a
HTTP request to the resource URI with method GET.
"""
return self.collection.responder.element(request, self.model)
def update(self, request):
"""
Changes the attributes of the resource identified by 'ident'
and redirects to the resource URI. Usually called by a HTTP
request to the resource URI with method PUT.
"""
# Create a form from the model/PUT data
ResourceForm = models.modelform_factory(self.model.__class__, form=self.collection.form_class)
data = self.collection.receiver.get_put_data(request)
form = ResourceForm(data, instance=self.model)
# If the data contains no errors, save the model,
# return a "200 Ok" response with the model's
# URI in the location header and a representation
# of the model in the response body.
if form.is_valid():
form.save()
response = self.read(request)
response.status_code = 200
response['Location'] = self.get_url()
return response
# Otherwise return a 400 Bad Request error.
raise InvalidModelData(form.errors)
def delete(self, request):
"""
Deletes the model associated with the current entry.
Usually called by a HTTP request to the entry URI
with method DELETE.
"""
self.model.delete()
return HttpResponse(_("Object successfully deleted."), self.collection.responder.mimetype)
| [
[
8,
0,
0.0082,
0.0123,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0165,
0.0041,
0,
0.66,
0.0833,
294,
0,
1,
0,
0,
294,
0,
0
],
[
1,
0,
0.0206,
0.0041,
0,
0.66... | [
"\"\"\"\nModel-bound resource class.\n\"\"\"",
"from django import forms",
"from django.conf.urls.defaults import patterns",
"from django.http import *",
"from django.forms import ModelForm, models",
"from django.forms.util import ErrorDict",
"from django.utils.functional import curry",
"from django.u... |
VERSION = (1, 1)
| [
[
14,
0,
1,
1,
0,
0.66,
0,
557,
0,
0,
0,
0,
0,
8,
0
]
] | [
"VERSION = (1, 1)"
] |
from django.http import HttpResponse
from django.utils.translation import ugettext as _
import hashlib, time, random
def djangouser_auth(username, password):
"""
Check username and password against
django.contrib.auth.models.User
"""
from django.contrib.auth.models import User
try:
user = User.objects.get(username=username)
if user.check_password(password):
return True
else:
return False
except User.DoesNotExist:
return False
class NoAuthentication(object):
"""
No authentication: Permit every request.
"""
def is_authenticated(self, request):
return True
def challenge_headers(self):
return {}
class HttpBasicAuthentication(object):
"""
HTTP/1.0 basic authentication.
"""
def __init__(self, authfunc=djangouser_auth, realm=_('Restricted Access')):
"""
authfunc:
A user-defined function which takes a username and
password as its first and second arguments respectively
and returns True if the user is authenticated
realm:
An identifier for the authority that is requesting
authorization
"""
self.realm = realm
self.authfunc = authfunc
def challenge_headers(self):
"""
Returns the http headers that ask for appropriate
authorization.
"""
return {'WWW-Authenticate' : 'Basic realm="%s"' % self.realm}
def is_authenticated(self, request):
"""
Checks whether a request comes from an authorized user.
"""
if not request.META.has_key('HTTP_AUTHORIZATION'):
return False
(authmeth, auth) = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
if authmeth.lower() != 'basic':
return False
auth = auth.strip().decode('base64')
username, password = auth.split(':', 1)
return self.authfunc(username=username, password=password)
def digest_password(realm, username, password):
"""
Construct the appropriate hashcode needed for HTTP digest
"""
return hashlib.md5("%s:%s:%s" % (username, realm, password)).hexdigest()
class HttpDigestAuthentication(object):
"""
HTTP/1.1 digest authentication (RFC 2617).
Uses code from the Python Paste Project (MIT Licence).
"""
def __init__(self, authfunc, realm=_('Restricted Access')):
"""
authfunc:
A user-defined function which takes a username and
a realm as its first and second arguments respectively
and returns the combined md5 hash of username,
authentication realm and password.
realm:
An identifier for the authority that is requesting
authorization
"""
self.realm = realm
self.authfunc = authfunc
self.nonce = {} # prevention of replay attacks
def get_auth_dict(self, auth_string):
"""
Splits WWW-Authenticate and HTTP_AUTHORIZATION strings
into a dictionaries, e.g.
{
nonce : "951abe58eddbb49c1ed77a3a5fb5fc2e"',
opaque : "34de40e4f2e4f4eda2a3952fd2abab16"',
realm : "realm1"',
qop : "auth"'
}
"""
amap = {}
for itm in auth_string.split(", "):
(k, v) = [s.strip() for s in itm.split("=", 1)]
amap[k] = v.replace('"', '')
return amap
def get_auth_response(self, http_method, fullpath, username, nonce, realm, qop, cnonce, nc):
"""
Returns the server-computed digest response key.
http_method:
The request method, e.g. GET
username:
The user to be authenticated
fullpath:
The absolute URI to be accessed by the user
nonce:
A server-specified data string which should be
uniquely generated each time a 401 response is made
realm:
A string to be displayed to users so they know which
username and password to use
qop:
Indicates the "quality of protection" values supported
by the server. The value "auth" indicates authentication.
cnonce:
An opaque quoted string value provided by the client
and used by both client and server to avoid chosen
plaintext attacks, to provide mutual authentication,
and to provide some message integrity protection.
nc:
Hexadecimal request counter
"""
ha1 = self.authfunc(realm, username)
ha2 = hashlib.md5('%s:%s' % (http_method, fullpath)).hexdigest()
if qop:
chk = "%s:%s:%s:%s:%s:%s" % (ha1, nonce, nc, cnonce, qop, ha2)
else:
chk = "%s:%s:%s" % (ha1, nonce, ha2)
computed_response = hashlib.md5(chk).hexdigest()
return computed_response
def challenge_headers(self, stale=''):
"""
Returns the http headers that ask for appropriate
authorization.
"""
nonce = hashlib.md5(
"%s:%s" % (time.time(), random.random())).hexdigest()
opaque = hashlib.md5(
"%s:%s" % (time.time(), random.random())).hexdigest()
self.nonce[nonce] = None
parts = {'realm': self.realm, 'qop': 'auth',
'nonce': nonce, 'opaque': opaque }
if stale:
parts['stale'] = 'true'
head = ", ".join(['%s="%s"' % (k, v) for (k, v) in parts.items()])
return {'WWW-Authenticate':'Digest %s' % head}
def is_authenticated(self, request):
"""
Checks whether a request comes from an authorized user.
"""
# Make sure the request is a valid HttpDigest request
if not request.META.has_key('HTTP_AUTHORIZATION'):
return False
fullpath = request.META['SCRIPT_NAME'] + request.META['PATH_INFO']
(authmeth, auth) = request.META['HTTP_AUTHORIZATION'].split(" ", 1)
if authmeth.lower() != 'digest':
return False
# Extract auth parameters from request
amap = self.get_auth_dict(auth)
try:
username = amap['username']
authpath = amap['uri']
nonce = amap['nonce']
realm = amap['realm']
response = amap['response']
assert authpath.split("?", 1)[0] in fullpath
assert realm == self.realm
qop = amap.get('qop', '')
cnonce = amap.get('cnonce', '')
nc = amap.get('nc', '00000000')
if qop:
assert 'auth' == qop
assert nonce and nc
except:
return False
# Compute response key
computed_response = self.get_auth_response(request.method, fullpath, username, nonce, realm, qop, cnonce, nc)
# Compare server-side key with key from client
# Prevent replay attacks
if not computed_response or computed_response != response:
if nonce in self.nonce:
del self.nonce[nonce]
return False
pnc = self.nonce.get(nonce,'00000000')
if nc <= pnc:
if nonce in self.nonce:
del self.nonce[nonce]
return False # stale = True
self.nonce[nonce] = nc
return True
| [
[
1,
0,
0.0047,
0.0047,
0,
0.66,
0,
779,
0,
1,
0,
0,
779,
0,
0
],
[
1,
0,
0.0095,
0.0047,
0,
0.66,
0.1429,
389,
0,
1,
0,
0,
389,
0,
0
],
[
1,
0,
0.0142,
0.0047,
0,
... | [
"from django.http import HttpResponse",
"from django.utils.translation import ugettext as _",
"import hashlib, time, random",
"def djangouser_auth(username, password):\n \"\"\"\n Check username and password against\n django.contrib.auth.models.User\n \"\"\"\n from django.contrib.auth.models imp... |
from distutils.core import setup
version = '%s.%s' % __import__('django_restapi').VERSION[:2]
setup(name='django-rest',
version=version,
packages=['django_restapi'],
author='Andriy Drozdyuk',
author_email='drozzy@gmail.com',
)
| [
[
1,
0,
0.1,
0.1,
0,
0.66,
0,
152,
0,
1,
0,
0,
152,
0,
0
],
[
14,
0,
0.3,
0.1,
0,
0.66,
0.5,
623,
4,
0,
0,
0,
0,
0,
1
],
[
8,
0,
0.75,
0.6,
0,
0.66,
1,
234,... | [
"from distutils.core import setup",
"version = '%s.%s' % __import__('django_restapi').VERSION[:2]",
"setup(name='django-rest',\n\t\tversion=version,\n\t\tpackages=['django_restapi'],\n\t\tauthor='Andriy Drozdyuk',\n\t\tauthor_email='drozzy@gmail.com',\n\t)"
] |
from os.path import realpath
DEBUG = True
TEMPLATE_DEBUG = True
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.sites',
'django_restapi_tests.polls',
'django_restapi_tests.people'
)
SITE_ID=1
ROOT_URLCONF = 'django_restapi_tests.urls'
DATABASE_NAME = realpath('testdata')
DATABASE_ENGINE = 'sqlite3'
TEMPLATE_DIRS = 'templates'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.middleware.common.CommonMiddleware',
# 'django.middleware.doc.XViewMiddleware',
)
| [
[
1,
0,
0.037,
0.037,
0,
0.66,
0,
79,
0,
1,
0,
0,
79,
0,
0
],
[
14,
0,
0.1111,
0.037,
0,
0.66,
0.1111,
309,
1,
0,
0,
0,
0,
4,
0
],
[
14,
0,
0.1481,
0.037,
0,
0.66,
... | [
"from os.path import realpath",
"DEBUG = True",
"TEMPLATE_DEBUG = True",
"INSTALLED_APPS = (\n 'django.contrib.contenttypes',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django_restapi_tests.polls',\n 'django_restapi_tests.p... |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| [
[
1,
0,
0.1818,
0.0909,
0,
0.66,
0,
879,
0,
1,
0,
0,
879,
0,
0
],
[
7,
0,
0.5,
0.5455,
0,
0.66,
0.5,
0,
0,
1,
0,
0,
0,
0,
2
],
[
1,
1,
0.3636,
0.0909,
1,
0.49,
... | [
"from django.core.management import execute_manager",
"try:\n import settings # Assumed to be in the same directory.\nexcept ImportError:\n import sys\n sys.stderr.write(\"Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\\nYou'll have to run dj... |
from datetime import datetime
from django.db import models
from django.utils.translation import gettext_lazy as _
class Poll(models.Model):
question = models.CharField(max_length=200)
password = models.CharField(max_length=200)
pub_date = models.DateTimeField(_('date published'), default=datetime.now)
class Admin:
pass
def __str__(self):
return self.question
def get_choice_list(self):
return list(self.choice_set.order_by('id'))
def get_choice_from_num(self, choice_num):
try:
return self.get_choice_list()[int(choice_num)-1]
except IndexError:
raise Choice.DoesNotExist
class Choice(models.Model):
poll = models.ForeignKey(Poll)
choice = models.CharField(max_length=200)
votes = models.IntegerField()
class Admin:
pass
def __str__(self):
return self.choice
def get_num(self):
try:
return self.poll.get_choice_list().index(self)+1
except ValueError:
raise Choice.DoesNotExist | [
[
1,
0,
0.0303,
0.0303,
0,
0.66,
0,
426,
0,
1,
0,
0,
426,
0,
0
],
[
1,
0,
0.0606,
0.0303,
0,
0.66,
0.25,
40,
0,
1,
0,
0,
40,
0,
0
],
[
1,
0,
0.0909,
0.0303,
0,
0.66... | [
"from datetime import datetime",
"from django.db import models",
"from django.utils.translation import gettext_lazy as _",
"class Poll(models.Model):\n question = models.CharField(max_length=200)\n password = models.CharField(max_length=200)\n pub_date = models.DateTimeField(_('date published'), defa... |
from binascii import b2a_base64
from datetime import datetime
from django.core import serializers
from django.test import TestCase
from django.utils.functional import curry
from django_restapi.authentication import HttpDigestAuthentication
from django_restapi_tests.examples.authentication import digest_authfunc
from django_restapi_tests.polls.models import Poll
import webbrowser, re
DIGEST_AUTH = 'Digest username="%(username)s", realm="%(realm)s", nonce="%(nonce)s", uri="%(fullpath)s", algorithm=MD5, response="%(response)s", qop=%(qop)s, nc=%(nc)s, cnonce="%(cnonce)s"'
SHOW_ERRORS_IN_BROWSER = False
def show_in_browser(content):
if SHOW_ERRORS_IN_BROWSER:
f = open("/tmp/djangorest_error", "w")
f.write(content)
f.close()
webbrowser.open_new("file:///tmp/djangorest_error")
class BasicTest(TestCase):
fixtures = ['initial_data.json']
def setUp(self):
self.client.put = curry(self.client.post, REQUEST_METHOD='PUT')
self.client.delete = curry(self.client.get, REQUEST_METHOD='DELETE')
def test_basics(self):
for format in ['xml', 'html']:
# Get list of polls
url = '/%s/polls/' % format
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(response.content.find('secret'), -1)
# Get list of choices
url = '/%s/choices/' % format
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
# Second page of choices must exist.
response = self.client.get(url, {'page' : 2})
self.failUnlessEqual(response.status_code, 200)
# Third page must not exist.
response = self.client.get(url, {'page' : 3})
self.failUnlessEqual(response.status_code, 404)
# Try to create poll with insufficient data
# (needs to fail)
url = '/%s/polls/' % format
params = {
'question' : 'Does this not work?',
}
response = self.client.post(url, params)
self.failUnlessEqual(response.status_code, 400)
# Create poll
params = {
'question' : 'Does this work?',
'password' : 'secret',
'pub_date' : '2001-01-01'
}
response = self.client.post(url, params)
self.failUnlessEqual(response.status_code, 201)
location = response['Location']
poll_id = int(re.findall("\d+", location)[0])
# Try to change poll with inappropriate data
# (needs to fail)
url = '/%s/polls/%d/' % (format, poll_id)
params = {
'question' : 'Yes, it works.',
'password' : 'newsecret',
'pub_date' : '2007-07-07-123'
}
response = self.client.put(url, params)
self.failUnlessEqual(response.status_code, 400)
# Change poll
url = '/%s/polls/%d/' % (format, poll_id)
params = {
'question' : 'Yes, it works.',
'password' : 'newsecret',
'pub_date' : '2007-07-07'
}
response = self.client.put(url, params)
self.failUnlessEqual(response.status_code, 200)
# Read poll
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(response.content.find('secret'), -1)
# Delete poll
response = self.client.delete(url)
self.failUnlessEqual(response.status_code, 200)
# Read choice
url = '/%s/choices/1/' % format
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
# Try to delete choice (must fail)
response = self.client.delete(url)
self.failUnlessEqual(response.status_code, 405)
def test_urlpatterns(self):
url = '/json/polls/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(response.content.find('secret'), -1)
# Get poll
url = '/json/polls/1/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(response.content.find('secret'), -1)
# Get filtered list of choices
url = '/json/polls/1/choices/'
response = self.client.get(url)
self.failUnlessEqual(len(eval(response.content)), 3)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(response.content.find('secret'), -1)
# Get choice
url = '/json/polls/1/choices/1/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
self.failUnlessEqual(response.content.find('secret'), -1)
# Get choice (failure)
url = '/json/polls/1/choices/12/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 404)
self.failUnlessEqual(response.content.find('secret'), -1)
# Try to create poll with insufficient data
# (needs to fail)
url = '/json/polls/'
params = {
'question' : 'Does this not work?',
}
response = self.client.post(url, params)
self.failUnlessEqual(response.status_code, 400)
# Create choice
url = '/json/polls/1/choices/'
params = {
'poll' : 1, # TODO: Should be taken from URL
'choice' : 'New choice',
'votes' : 0
}
response = self.client.post(url, params)
self.failUnlessEqual(response.status_code, 201)
location = response['location']
poll_id = int(re.findall("\d+", location)[0])
self.failUnlessEqual(poll_id, 1)
# Try to update choice with insufficient data (needs to fail)
url = location[17:]
# strip the protocol head and base url:
# only working with paths! (note: bad variable name choice!!!)
params = {
'poll' : poll_id,
'choice' : 'New choice',
'votes' : 'Should be an integer'
}
response = self.client.put(url, params)
self.failUnlessEqual(response.status_code, 400)
# Update choice
params = {
'poll' : poll_id,
'choice' : 'New choice',
'votes' : '712'
}
response = self.client.put(url, params)
self.failIfEqual(response.content.find("712"), -1)
self.failUnlessEqual(response.status_code, 200)
# Delete choice
response = self.client.delete(url)
self.failUnlessEqual(response.status_code, 200)
def test_submission(self):
# XML
url = '/fullxml/polls/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
# Create
new_poll = Poll(
question = 'Does XML submission work?',
password = 'secret',
pub_date = datetime.now()
)
serialized_poll = serializers.serialize('xml', [new_poll])
serialized_poll = serialized_poll.replace('pk="None"', 'pk="1"') # Is ignored, but needs to be an integer
response = self.client.post(url, data=serialized_poll, content_type='application/xml')
self.failUnlessEqual(response.status_code, 201)
response_content = re.sub('pk="\d+"', 'pk="1"', response.content)
self.failUnlessEqual(serialized_poll, response_content)
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
self.failIfEqual(response_content.find("XML submission"), -1)
# Update
url = '/fullxml/polls/1/'
updated_poll = Poll(
question = 'New question',
password = 'new_secret',
pub_date = datetime.now()
)
serialized_poll = serializers.serialize('xml', [updated_poll])
serialized_poll = serialized_poll.replace('pk="None"', 'pk="1"') # Is ignored, but needs to be an integer
response = self.client.put(url, data=serialized_poll, content_type='application/xml')
updated_poll = Poll.objects.get(id=1)
self.failUnlessEqual(updated_poll.question, "New question")
self.failUnlessEqual(updated_poll.password, "new_secret")
# JSON
url = '/fulljson/polls/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
# Create
new_poll = Poll(
question = 'Does JSON submission work?',
password = 'secret',
pub_date = datetime.now()
)
serialized_poll = serializers.serialize('json', [new_poll])
serialized_poll = serialized_poll.replace('"pk": null', '"pk": 1') # Is ignored, but needs to be an integer
response = self.client.post(url, data=serialized_poll, content_type='application/json')
self.failUnlessEqual(response.status_code, 201)
response_content = re.sub('"pk": \d+,', '"pk": 1,', response.content)
self.failUnlessEqual(serialized_poll, response_content)
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
self.failIfEqual(response_content.find("JSON submission"), -1)
# Update
url = '/fulljson/polls/2/'
updated_poll = Poll(
question = 'Another question',
password = 'another_secret',
pub_date = datetime.now()
)
serialized_poll = serializers.serialize('json', [updated_poll])
serialized_poll = serialized_poll.replace('"pk": "None"', '"pk": "1"') # Is ignored, but needs to be an integer
response = self.client.put(url, data=serialized_poll, content_type='application/json')
updated_poll = Poll.objects.get(id=2)
self.failUnlessEqual(updated_poll.question, "Another question")
self.failUnlessEqual(updated_poll.password, "another_secret")
class AuthenticationTest(TestCase):
fixtures = ['initial_data.json']
def get_digest_test_params(self, response, url, auth_helper):
"""
Extract authentication variables from server response
e.g. {'nonce': '477be2a405a439cdba5227be89ba0f76', 'qop': 'auth', 'realm': 'realm1', 'opaque': '67d958f952de6bd4c1a88686f1b8a896'}
and add missing params (method, path, username, cnonce, nc).
"""
www_auth_response = response['WWW-Authenticate']
self.failUnlessEqual(www_auth_response[:7].lower(), 'digest ')
auth_params = auth_helper.get_auth_dict(www_auth_response[7:])
self.failUnlessEqual(len(auth_params), 4)
auth_params.pop('opaque')
auth_params.update({'http_method': 'GET', 'fullpath': url, 'username': 'john', 'cnonce': '12345678', 'nc': '00000001'})
return auth_params
def test_basic_authentication(self):
# Basic authentication, no password
url = '/basic/polls/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 401)
# Basic authentication, wrong password
headers = {
'HTTP_AUTHORIZATION': 'Basic %s' % b2a_base64('rest:somepass')[:-1]
}
response = self.client.get(url, **headers)
self.failUnlessEqual(response.status_code, 401)
# Basic authentication, right password
headers = {
'HTTP_AUTHORIZATION': 'Basic %s' % b2a_base64('rest:rest')[:-1]
}
response = self.client.get(url, **headers)
self.failUnlessEqual(response.status_code, 200)
def test_digest_authentication(self):
# 1) Digest authentication, no password
url = '/digest/polls/'
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 401)
self.failUnlessEqual(response.has_header('WWW-Authenticate'), True)
# Set up an auth class in order to avoid duplicate
# authentication code.
auth_helper = HttpDigestAuthentication(authfunc=digest_authfunc, realm='realm1')
# 2) Digest authentication, wrong response (=wrong password)
auth_params = self.get_digest_test_params(response, url, auth_helper)
auth_params['response'] = 'wrongresponse'
headers = {
'SCRIPT_NAME' : '',
'HTTP_AUTHORIZATION': DIGEST_AUTH % auth_params
}
response = self.client.get(url, **headers)
self.failUnlessEqual(response.status_code, 401)
# 3) Digest authentication, right password
auth_params = self.get_digest_test_params(response, url, auth_helper)
response = auth_helper.get_auth_response(**auth_params)
auth_params['response'] = response
headers = {
'SCRIPT_NAME' : '',
'HTTP_AUTHORIZATION': DIGEST_AUTH % auth_params
}
response = self.client.get(url, **headers)
self.failUnlessEqual(response.status_code, 200)
| [
[
1,
0,
0.003,
0.003,
0,
0.66,
0,
984,
0,
1,
0,
0,
984,
0,
0
],
[
1,
0,
0.006,
0.003,
0,
0.66,
0.0769,
426,
0,
1,
0,
0,
426,
0,
0
],
[
1,
0,
0.009,
0.003,
0,
0.66,
... | [
"from binascii import b2a_base64",
"from datetime import datetime",
"from django.core import serializers",
"from django.test import TestCase",
"from django.utils.functional import curry",
"from django_restapi.authentication import HttpDigestAuthentication",
"from django_restapi_tests.examples.authentica... |
from django.conf.urls.defaults import *
from django.contrib import admin
urlpatterns = patterns('',
url(r'', include('django_restapi_tests.examples.simple')),
url(r'', include('django_restapi_tests.examples.basic')),
url(r'', include('django_restapi_tests.examples.template')),
url(r'', include('django_restapi_tests.examples.custom_urls')),
url(r'', include('django_restapi_tests.examples.fixedend_urls')),
url(r'', include('django_restapi_tests.examples.authentication')),
url(r'', include('django_restapi_tests.examples.submission')),
url(r'', include('django_restapi_tests.examples.generic_resource')),
url(r'^admin/(.*)', admin.site.root)
)
| [
[
1,
0,
0.0714,
0.0714,
0,
0.66,
0,
341,
0,
1,
0,
0,
341,
0,
0
],
[
1,
0,
0.1429,
0.0714,
0,
0.66,
0.5,
302,
0,
1,
0,
0,
302,
0,
0
],
[
14,
0,
0.6429,
0.7857,
0,
0.... | [
"from django.conf.urls.defaults import *",
"from django.contrib import admin",
"urlpatterns = patterns('',\n url(r'', include('django_restapi_tests.examples.simple')),\n url(r'', include('django_restapi_tests.examples.basic')),\n url(r'', include('django_restapi_tests.examples.template')),\n url(r'', in... |
from django.conf.urls.defaults import *
from django_restapi.model_resource import Collection
from django_restapi.responder import *
from django_restapi.receiver import *
from django_restapi_tests.polls.models import Poll
fullxml_poll_resource = Collection(
queryset = Poll.objects.all(),
permitted_methods = ('GET', 'POST', 'PUT', 'DELETE'),
receiver = XMLReceiver(),
responder = XMLResponder(),
)
fulljson_poll_resource = Collection(
queryset = Poll.objects.all(),
permitted_methods = ('GET', 'POST', 'PUT', 'DELETE'),
receiver = JSONReceiver(),
responder = JSONResponder()
)
urlpatterns = patterns('',
url(r'^fullxml/polls/(.*?)/?$', fullxml_poll_resource),
url(r'^fulljson/polls/(.*?)/?$', fulljson_poll_resource)
)
| [
[
1,
0,
0.0435,
0.0435,
0,
0.66,
0,
341,
0,
1,
0,
0,
341,
0,
0
],
[
1,
0,
0.087,
0.0435,
0,
0.66,
0.1429,
272,
0,
1,
0,
0,
272,
0,
0
],
[
1,
0,
0.1304,
0.0435,
0,
0... | [
"from django.conf.urls.defaults import *",
"from django_restapi.model_resource import Collection",
"from django_restapi.responder import *",
"from django_restapi.receiver import *",
"from django_restapi_tests.polls.models import Poll",
"fullxml_poll_resource = Collection(\n queryset = Poll.objects.all(... |
from django.conf.urls.defaults import *
from django_restapi.model_resource import Collection
from django_restapi.responder import *
from django_restapi_tests.polls.models import Poll, Choice
xml_poll_resource = Collection(
queryset = Poll.objects.all(),
permitted_methods = ('GET', 'POST', 'PUT', 'DELETE'),
expose_fields = ('id', 'question', 'pub_date'),
responder = XMLResponder(paginate_by = 10)
)
xml_choice_resource = Collection(
queryset = Choice.objects.all(),
permitted_methods = ('GET',),
expose_fields = ('id', 'poll_id', 'choice'),
responder = XMLResponder(paginate_by = 5)
)
urlpatterns = patterns('',
url(r'^xml/polls/(.*?)/?$', xml_poll_resource),
url(r'^xml/choices/(.*?)/?$', xml_choice_resource)
)
| [
[
1,
0,
0.0417,
0.0417,
0,
0.66,
0,
341,
0,
1,
0,
0,
341,
0,
0
],
[
1,
0,
0.0833,
0.0417,
0,
0.66,
0.1667,
272,
0,
1,
0,
0,
272,
0,
0
],
[
1,
0,
0.125,
0.0417,
0,
0... | [
"from django.conf.urls.defaults import *",
"from django_restapi.model_resource import Collection",
"from django_restapi.responder import *",
"from django_restapi_tests.polls.models import Poll, Choice",
"xml_poll_resource = Collection(\n queryset = Poll.objects.all(),\n permitted_methods = ('GET', 'PO... |
from django.conf.urls.defaults import *
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django_restapi.resource import Resource
from django_restapi_tests.people.models import *
# Urls for a resource that does not map 1:1
# to Django models.
class FriendshipCollection(Resource):
def read(self, request):
friendships = get_friendship_list()
context = {'friendships':friendships}
return render_to_response('people/friends_list.html', context)
class FriendshipEntry(Resource):
def read(self, request, person_id, friend_id):
friendship = get_friendship(person_id, friend_id)
context = {'friendship':friendship}
return render_to_response('people/friends_detail.html', context)
def delete(self, request, person_id, friend_id):
friendship = get_friendship(person_id, friend_id)
friendship[0].friends.remove(friendship[1])
return HttpResponseRedirect('/friends/')
urlpatterns = patterns('',
url(r'^friends/$', FriendshipCollection()),
url(r'^friends/(?P<person_id>\d+)-(?P<friend_id>\d+)/$', FriendshipEntry(permitted_methods=('GET','DELETE'))),
) | [
[
1,
0,
0.0345,
0.0345,
0,
0.66,
0,
341,
0,
1,
0,
0,
341,
0,
0
],
[
1,
0,
0.069,
0.0345,
0,
0.66,
0.1429,
779,
0,
1,
0,
0,
779,
0,
0
],
[
1,
0,
0.1034,
0.0345,
0,
0... | [
"from django.conf.urls.defaults import *",
"from django.http import HttpResponseRedirect",
"from django.shortcuts import render_to_response",
"from django_restapi.resource import Resource",
"from django_restapi_tests.people.models import *",
"class FriendshipCollection(Resource):\n def read(self, reque... |
from django.conf.urls.defaults import *
from django_restapi.model_resource import Collection, Entry, reverse
from django_restapi.responder import *
from django_restapi_tests.polls.models import Poll, Choice
# JSON Test API URLs
#
# Polls are available at /json/polls/ and
# /json/polls/[poll_id]/.
#
# Different (manual) URL structure for choices:
# /json/polls/[poll_id]/choices/[number of choice]/
# Example: /json/polls/121/choices/2/ identifies the second
# choice for the poll with ID 121.
class ChoiceCollection(Collection):
def read(self, request):
poll_id = int(request.path.split("/")[3])
filtered_set = self.queryset._clone()
filtered_set = filtered_set.filter(poll__id=poll_id)
return self.responder.list(request, filtered_set)
def get_entry(self, poll_id, choice_num):
poll = Poll.objects.get(id=int(poll_id))
choice = poll.get_choice_from_num(int(choice_num))
return ChoiceEntry(self, choice)
def get_url(self):
return reverse(self, (), {'poll_id':self.model.poll.id})
class ChoiceEntry(Entry):
def get_url(self):
choice_num = self.model.get_num()
return reverse(self.collection, (), {'poll_id':self.model.poll.id, 'choice_num':choice_num})
json_poll_resource = Collection(
queryset = Poll.objects.all(),
permitted_methods = ('GET', 'POST', 'PUT', 'DELETE'),
expose_fields = ('id', 'question', 'pub_date'),
responder = JSONResponder(paginate_by=10)
)
json_choice_resource = ChoiceCollection(
queryset = Choice.objects.all(),
permitted_methods = ('GET', 'POST', 'PUT', 'DELETE'),
expose_fields = ('id', 'poll_id', 'choice', 'votes'),
responder = JSONResponder(paginate_by=5),
entry_class = ChoiceEntry
)
urlpatterns = patterns('',
url(r'^json/polls/(?P<poll_id>\d+)/choices/(?P<choice_num>\d+)/$', json_choice_resource, {'is_entry':True}),
url(r'^json/polls/(?P<poll_id>\d+)/choices/$', json_choice_resource, {'is_entry':False}),
url(r'^json/polls/(.*?)/?$', json_poll_resource)
)
| [
[
1,
0,
0.0175,
0.0175,
0,
0.66,
0,
341,
0,
1,
0,
0,
341,
0,
0
],
[
1,
0,
0.0351,
0.0175,
0,
0.66,
0.125,
272,
0,
3,
0,
0,
272,
0,
0
],
[
1,
0,
0.0526,
0.0175,
0,
0... | [
"from django.conf.urls.defaults import *",
"from django_restapi.model_resource import Collection, Entry, reverse",
"from django_restapi.responder import *",
"from django_restapi_tests.polls.models import Poll, Choice",
"class ChoiceCollection(Collection):\n \n def read(self, request):\n poll_id... |
from django.conf.urls.defaults import *
from django_restapi.model_resource import Collection
from django_restapi.responder import *
from django_restapi_tests.polls.models import Poll, Choice
template_poll_resource = Collection(
queryset = Poll.objects.all(),
permitted_methods = ('GET', 'POST', 'PUT', 'DELETE'),
expose_fields = ('id', 'question', 'pub_date'),
responder = TemplateResponder(
template_dir = 'polls',
template_object_name = 'poll',
paginate_by = 10
)
)
template_choice_resource = Collection(
queryset = Choice.objects.all(),
permitted_methods = ('GET',),
expose_fields = ('id', 'poll_id', 'choice', 'votes'),
responder = TemplateResponder(
template_dir = 'polls',
template_object_name = 'choice',
paginate_by = 5
)
)
urlpatterns = patterns('',
url(r'^html/polls/creator/$', template_poll_resource.responder.create_form),
url(r'^html/polls/(?P<pk>\d+)/editor/$', template_poll_resource.responder.update_form),
url(r'^html/polls/(.*?)/?$', template_poll_resource),
url(r'^html/choices/(.*?)/?$', template_choice_resource),
)
| [
[
1,
0,
0.0303,
0.0303,
0,
0.66,
0,
341,
0,
1,
0,
0,
341,
0,
0
],
[
1,
0,
0.0606,
0.0303,
0,
0.66,
0.1667,
272,
0,
1,
0,
0,
272,
0,
0
],
[
1,
0,
0.0909,
0.0303,
0,
... | [
"from django.conf.urls.defaults import *",
"from django_restapi.model_resource import Collection",
"from django_restapi.responder import *",
"from django_restapi_tests.polls.models import Poll, Choice",
"template_poll_resource = Collection(\n queryset = Poll.objects.all(),\n permitted_methods = ('GET'... |
from django.conf.urls.defaults import *
from django_restapi.model_resource import Collection
from django_restapi.responder import *
from django_restapi_tests.polls.models import Poll, Choice
simple_poll_resource = Collection(
queryset = Poll.objects.all(),
responder = XMLResponder(),
)
simple_choice_resource = Collection(
queryset = Choice.objects.all(),
responder = XMLResponder()
)
urlpatterns = patterns('',
url(r'^api/poll/(.*?)/?$', simple_poll_resource),
url(r'^api/choice/(.*?)/?$', simple_choice_resource)
)
| [
[
1,
0,
0.0556,
0.0556,
0,
0.66,
0,
341,
0,
1,
0,
0,
341,
0,
0
],
[
1,
0,
0.1111,
0.0556,
0,
0.66,
0.1667,
272,
0,
1,
0,
0,
272,
0,
0
],
[
1,
0,
0.1667,
0.0556,
0,
... | [
"from django.conf.urls.defaults import *",
"from django_restapi.model_resource import Collection",
"from django_restapi.responder import *",
"from django_restapi_tests.polls.models import Poll, Choice",
"simple_poll_resource = Collection(\n queryset = Poll.objects.all(), \n responder = XMLResponder(),... |
from django.conf.urls.defaults import *
from django_restapi.model_resource import Collection
from django_restapi.responder import *
from django_restapi_tests.polls.models import Poll, Choice
fixedend_poll_resource = Collection(
queryset = Poll.objects.all(),
responder = XMLResponder(),
)
fixedend_choice_resource = Collection(
queryset = Choice.objects.all(),
responder = XMLResponder()
)
urlpatterns = patterns('',
url(r'^polls/xml/$', fixedend_poll_resource),
url(r'^polls/(.*)/xml/$', fixedend_poll_resource),
url(r'^choices/xml/$', fixedend_choice_resource),
url(r'^choices/(.*)/xml/$', fixedend_choice_resource)
)
| [
[
1,
0,
0.05,
0.05,
0,
0.66,
0,
341,
0,
1,
0,
0,
341,
0,
0
],
[
1,
0,
0.1,
0.05,
0,
0.66,
0.1667,
272,
0,
1,
0,
0,
272,
0,
0
],
[
1,
0,
0.15,
0.05,
0,
0.66,
0.3... | [
"from django.conf.urls.defaults import *",
"from django_restapi.model_resource import Collection",
"from django_restapi.responder import *",
"from django_restapi_tests.polls.models import Poll, Choice",
"fixedend_poll_resource = Collection(\n queryset = Poll.objects.all(), \n responder = XMLResponder(... |
from django.conf.urls.defaults import *
from django_restapi.model_resource import Collection
from django_restapi.responder import *
from django_restapi.authentication import *
from django_restapi_tests.polls.models import Poll
# HTTP Basic
#
# No auth function specified
# -> django.contrib.auth.models.User is used.
# Test with username 'rest', password 'rest'.
basicauth_poll_resource = Collection(
queryset = Poll.objects.all(),
responder = XMLResponder(),
authentication = HttpBasicAuthentication()
)
# HTTP Digest
def digest_authfunc(username, realm):
"""
Exemplary authfunc for HTTP Digest. In production situations,
the combined hashes of realm, username and password are usually
stored in an external file/db.
"""
hashes = {
('realm1', 'john') : '3014aff1d0d0f0038e23c1195301def3', # Password: johnspass
('realm2', 'jim') : '5bae77fe607e161b831c8f8026a2ceb2' # Password: jimspass
}
return hashes[(username, realm)]
digestauth_poll_resource = Collection(
queryset = Poll.objects.all(),
responder = XMLResponder(),
authentication = HttpDigestAuthentication(digest_authfunc, 'realm1')
)
urlpatterns = patterns('',
url(r'^basic/polls/(.*?)/?$', basicauth_poll_resource),
url(r'^digest/polls/(.*?)/?$', digestauth_poll_resource)
) | [
[
1,
0,
0.0238,
0.0238,
0,
0.66,
0,
341,
0,
1,
0,
0,
341,
0,
0
],
[
1,
0,
0.0476,
0.0238,
0,
0.66,
0.125,
272,
0,
1,
0,
0,
272,
0,
0
],
[
1,
0,
0.0714,
0.0238,
0,
0... | [
"from django.conf.urls.defaults import *",
"from django_restapi.model_resource import Collection",
"from django_restapi.responder import *",
"from django_restapi.authentication import *",
"from django_restapi_tests.polls.models import Poll",
"basicauth_poll_resource = Collection(\n queryset = Poll.obje... |
from django.db import models
from django.http import Http404
class Person(models.Model):
name = models.CharField(max_length=20)
friends = models.ManyToManyField('self')
idols = models.ManyToManyField('self', symmetrical=False, related_name='stalkers')
def __unicode__(self):
return self.name
def get_friendship_list():
people = Person.objects.filter(friends__isnull=False)
friendships = []
for person in people:
for friend in person.friends.all():
friendship = [person, friend]
friendship.sort(cmp=lambda x, y: cmp(x.name, y.name))
if friendship not in friendships:
friendships.append(friendship)
friendships.sort(cmp=lambda x, y: cmp(x[0].name, y[0].name))
return friendships
def get_friendship(person_id, friend_id):
person = Person.objects.get(id=person_id)
try:
friend = person.friends.get(id=friend_id)
except Person.DoesNotExist:
raise Http404
friendship = [person, friend]
friendship.sort(cmp=lambda x,y: cmp(x.name, y.name))
return friendship | [
[
1,
0,
0.0312,
0.0312,
0,
0.66,
0,
40,
0,
1,
0,
0,
40,
0,
0
],
[
1,
0,
0.0625,
0.0312,
0,
0.66,
0.25,
779,
0,
1,
0,
0,
779,
0,
0
],
[
3,
0,
0.2188,
0.2188,
0,
0.66... | [
"from django.db import models",
"from django.http import Http404",
"class Person(models.Model):\n name = models.CharField(max_length=20)\n friends = models.ManyToManyField('self')\n idols = models.ManyToManyField('self', symmetrical=False, related_name='stalkers')\n\n def __unicode__(self):\n ... |
from django.test import TestCase
from django.utils.functional import curry
class GenericTest(TestCase):
fixtures = ['initial_data.json']
def setUp(self):
self.client.put = curry(self.client.post, REQUEST_METHOD='PUT')
self.client.delete = curry(self.client.get, REQUEST_METHOD='DELETE')
def test_resource(self):
url = '/friends/'
response = self.client.post(url)
self.failUnlessEqual(response.status_code, 405)
response = self.client.put(url)
self.failUnlessEqual(response.status_code, 405)
response = self.client.delete(url)
self.failUnlessEqual(response.status_code, 405)
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
url = '/friends/1-2/'
response = self.client.post(url)
self.failUnlessEqual(response.status_code, 405)
response = self.client.put(url)
self.failUnlessEqual(response.status_code, 405)
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
response = self.client.delete(url)
self.failUnlessEqual(response.status_code, 302)
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 404) | [
[
1,
0,
0.0294,
0.0294,
0,
0.66,
0,
944,
0,
1,
0,
0,
944,
0,
0
],
[
1,
0,
0.0588,
0.0294,
0,
0.66,
0.5,
375,
0,
1,
0,
0,
375,
0,
0
],
[
3,
0,
0.5588,
0.9118,
0,
0.6... | [
"from django.test import TestCase",
"from django.utils.functional import curry",
"class GenericTest(TestCase):\n\n fixtures = ['initial_data.json']\n \n def setUp(self):\n self.client.put = curry(self.client.post, REQUEST_METHOD='PUT')\n self.client.delete = curry(self.client.get, REQ... |
from django.db import models
# Create your models here.
| [
[
1,
0,
0.3333,
0.3333,
0,
0.66,
0,
40,
0,
1,
0,
0,
40,
0,
0
]
] | [
"from django.db import models"
] |
# Create your views here.
| [] | [] |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| [
[
1,
0,
0.1818,
0.0909,
0,
0.66,
0,
879,
0,
1,
0,
0,
879,
0,
0
],
[
7,
0,
0.5,
0.5455,
0,
0.66,
0.5,
0,
0,
1,
0,
0,
0,
0,
2
],
[
1,
1,
0.3636,
0.0909,
1,
0.98,
... | [
"from django.core.management import execute_manager",
"try:\n import settings # Assumed to be in the same directory.\nexcept ImportError:\n import sys\n sys.stderr.write(\"Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\\nYou'll have to run dj... |
#!/usr/bin/python2.4
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''The setup and build script for the python-twitter library.'''
__author__ = 'python-twitter@googlegroups.com'
__version__ = '0.8.3'
# The base package metadata to be used by both distutils and setuptools
METADATA = dict(
name = "python-twitter",
version = __version__,
py_modules = ['twitter'],
author='The Python-Twitter Developers',
author_email='python-twitter@googlegroups.com',
description='A python wrapper around the Twitter API',
license='Apache License 2.0',
url='http://code.google.com/p/python-twitter/',
keywords='twitter api',
)
# Extra package metadata to be used only if setuptools is installed
SETUPTOOLS_METADATA = dict(
install_requires = ['setuptools', 'simplejson', 'oauth2'],
include_package_data = True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
],
test_suite = 'twitter_test.suite',
)
def Read(file):
return open(file).read()
def BuildLongDescription():
return '\n'.join([Read('README'), Read('CHANGES')])
def Main():
# Build the long_description from the README and CHANGES
METADATA['long_description'] = BuildLongDescription()
# Use setuptools if available, otherwise fallback and use distutils
try:
import setuptools
METADATA.update(SETUPTOOLS_METADATA)
setuptools.setup(**METADATA)
except ImportError:
import distutils.core
distutils.core.setup(**METADATA)
if __name__ == '__main__':
Main()
| [
[
8,
0,
0.2329,
0.0137,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2603,
0.0137,
0,
0.66,
0.125,
777,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.274,
0.0137,
0,
0.66,
... | [
"'''The setup and build script for the python-twitter library.'''",
"__author__ = 'python-twitter@googlegroups.com'",
"__version__ = '0.8.3'",
"METADATA = dict(\n name = \"python-twitter\",\n version = __version__,\n py_modules = ['twitter'],\n author='The Python-Twitter Developers',\n author_email='pyth... |
#!/usr/bin/python2.4
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
# parse_qsl moved to urlparse module in v2.6
try:
from urlparse import parse_qsl
except:
from cgi import parse_qsl
import oauth2 as oauth
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
consumer_key = None
consumer_secret = None
if consumer_key is None or consumer_secret is None:
print 'You need to edit this script and provide values for the'
print 'consumer_key and also consumer_secret.'
print ''
print 'The values you need come from Twitter - you need to register'
print 'as a developer your "application". This is needed only until'
print 'Twitter finishes the idea they have of a way to allow open-source'
print 'based libraries to have a token that can be used to generate a'
print 'one-time use key that will allow the library to make the request'
print 'on your behalf.'
print ''
sys.exit(1)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
print 'Requesting temp token from Twitter'
resp, content = oauth_client.request(REQUEST_TOKEN_URL, 'GET')
if resp['status'] != '200':
print 'Invalid respond from Twitter requesting temp token: %s' % resp['status']
else:
request_token = dict(parse_qsl(content))
print ''
print 'Please visit this Twitter page and retrieve the pincode to be used'
print 'in the next step to obtaining an Authentication Token:'
print ''
print '%s?oauth_token=%s' % (AUTHORIZATION_URL, request_token['oauth_token'])
print ''
pincode = raw_input('Pincode? ')
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(pincode)
print ''
print 'Generating and signing request for an access token'
print ''
oauth_client = oauth.Client(oauth_consumer, token)
resp, content = oauth_client.request(ACCESS_TOKEN_URL, method='POST', body='oauth_verifier=%s' % pincode)
access_token = dict(parse_qsl(content))
if resp['status'] != '200':
print 'The request for a Token did not succeed: %s' % resp['status']
print access_token
else:
print 'Your Twitter Access Token key: %s' % access_token['oauth_token']
print ' Access Token secret: %s' % access_token['oauth_token_secret']
print ''
| [
[
1,
0,
0.2,
0.0111,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.2111,
0.0111,
0,
0.66,
0.0625,
509,
0,
1,
0,
0,
509,
0,
0
],
[
7,
0,
0.2611,
0.0444,
0,
0.6... | [
"import os",
"import sys",
"try:\n from urlparse import parse_qsl\nexcept:\n from cgi import parse_qsl",
" from urlparse import parse_qsl",
" from cgi import parse_qsl",
"import oauth2 as oauth",
"REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'",
"ACCESS_TOKEN_URL = 'https://api... |
"""Implementation of JSONEncoder
"""
import re
try:
from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from simplejson._speedups import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif _skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| [
[
8,
0,
0.0035,
0.0046,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0069,
0.0023,
0,
0.66,
0.0667,
540,
0,
1,
0,
0,
540,
0,
0
],
[
7,
0,
0.015,
0.0092,
0,
0.66,... | [
"\"\"\"Implementation of JSONEncoder\n\"\"\"",
"import re",
"try:\n from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii\nexcept ImportError:\n c_encode_basestring_ascii = None",
" from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii",... |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError("%r is not JSON serializable" % (o,))
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.0.7'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
| [
[
8,
0,
0.1582,
0.3133,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.3165,
0.0032,
0,
0.66,
0.1,
162,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.3244,
0.0127,
0,
0.66,
... | [
"r\"\"\"JSON (JavaScript Object Notation) <http://json.org> is a subset of\nJavaScript syntax (ECMA-262 3rd edition) used as a lightweight data\ninterchange format.\n\n:mod:`simplejson` exposes an API familiar to users of the standard library\n:mod:`marshal` and :mod:`pickle` modules. It is the externally maintaine... |
"""JSON token scanner
"""
import re
try:
from simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
| [
[
8,
0,
0.0231,
0.0308,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0462,
0.0154,
0,
0.66,
0.1667,
540,
0,
1,
0,
0,
540,
0,
0
],
[
7,
0,
0.0846,
0.0615,
0,
0.66... | [
"\"\"\"JSON token scanner\n\"\"\"",
"import re",
"try:\n from simplejson._speedups import make_scanner as c_make_scanner\nexcept ImportError:\n c_make_scanner = None",
" from simplejson._speedups import make_scanner as c_make_scanner",
" c_make_scanner = None",
"__all__ = ['make_scanner']",
... |
#!/usr/bin/python2.4
'''Post a message to twitter'''
__author__ = 'dewitt@google.com'
import ConfigParser
import getopt
import os
import sys
import twitter
USAGE = '''Usage: tweet [options] message
This script posts a message to Twitter.
Options:
-h --help : print this help
--consumer-key : the twitter consumer key
--consumer-secret : the twitter consumer secret
--access-key : the twitter access token key
--access-secret : the twitter access token secret
--encoding : the character set encoding used in input strings, e.g. "utf-8". [optional]
Documentation:
If either of the command line flags are not present, the environment
variables TWEETUSERNAME and TWEETPASSWORD will then be checked for your
consumer_key or consumer_secret, respectively.
If neither the command line flags nor the enviroment variables are
present, the .tweetrc file, if it exists, can be used to set the
default consumer_key and consumer_secret. The file should contain the
following three lines, replacing *consumer_key* with your consumer key, and
*consumer_secret* with your consumer secret:
A skeletal .tweetrc file:
[Tweet]
consumer_key: *consumer_key*
consumer_secret: *consumer_password*
access_key: *access_key*
access_secret: *access_password*
'''
def PrintUsageAndExit():
print USAGE
sys.exit(2)
def GetConsumerKeyEnv():
return os.environ.get("TWEETUSERNAME", None)
def GetConsumerSecretEnv():
return os.environ.get("TWEETPASSWORD", None)
def GetAccessKeyEnv():
return os.environ.get("TWEETACCESSKEY", None)
def GetAccessSecretEnv():
return os.environ.get("TWEETACCESSSECRET", None)
class TweetRc(object):
def __init__(self):
self._config = None
def GetConsumerKey(self):
return self._GetOption('consumer_key')
def GetConsumerSecret(self):
return self._GetOption('consumer_secret')
def GetAccessKey(self):
return self._GetOption('access_key')
def GetAccessSecret(self):
return self._GetOption('access_secret')
def _GetOption(self, option):
try:
return self._GetConfig().get('Tweet', option)
except:
return None
def _GetConfig(self):
if not self._config:
self._config = ConfigParser.ConfigParser()
self._config.read(os.path.expanduser('~/.tweetrc'))
return self._config
def main():
try:
shortflags = 'h'
longflags = ['help', 'consumer-key=', 'consumer-secret=',
'access-key=', 'access-secret=', 'encoding=']
opts, args = getopt.gnu_getopt(sys.argv[1:], shortflags, longflags)
except getopt.GetoptError:
PrintUsageAndExit()
consumer_keyflag = None
consumer_secretflag = None
access_keyflag = None
access_secretflag = None
encoding = None
for o, a in opts:
if o in ("-h", "--help"):
PrintUsageAndExit()
if o in ("--consumer-key"):
consumer_keyflag = a
if o in ("--consumer-secret"):
consumer_secretflag = a
if o in ("--access-key"):
access_keyflag = a
if o in ("--access-secret"):
access_secretflag = a
if o in ("--encoding"):
encoding = a
message = ' '.join(args)
if not message:
PrintUsageAndExit()
rc = TweetRc()
consumer_key = consumer_keyflag or GetConsumerKeyEnv() or rc.GetConsumerKey()
consumer_secret = consumer_secretflag or GetConsumerSecretEnv() or rc.GetConsumerSecret()
access_key = access_keyflag or GetAccessKeyEnv() or rc.GetAccessKey()
access_secret = access_secretflag or GetAccessSecretEnv() or rc.GetAccessSecret()
if not consumer_key or not consumer_secret or not access_key or not access_secret:
PrintUsageAndExit()
api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret,
access_token_key=access_key, access_token_secret=access_secret,
input_encoding=encoding)
try:
status = api.PostUpdate(message)
except UnicodeDecodeError:
print "Your message could not be encoded. Perhaps it contains non-ASCII characters? "
print "Try explicitly specifying the encoding with the --encoding flag"
sys.exit(2)
print "%s just posted: %s" % (status.user.name, status.text)
if __name__ == "__main__":
main()
| [
[
8,
0,
0.0213,
0.0071,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0355,
0.0071,
0,
0.66,
0.0667,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.0496,
0.0071,
0,
0.66,... | [
"'''Post a message to twitter'''",
"__author__ = 'dewitt@google.com'",
"import ConfigParser",
"import getopt",
"import os",
"import sys",
"import twitter",
"USAGE = '''Usage: tweet [options] message\n\n This script posts a message to Twitter.\n\n Options:\n\n -h --help : print this help\n --co... |
#!/usr/bin/python2.4
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''A class that defines the default URL Shortener.
TinyURL is provided as the default and as an example.
'''
import urllib
# Change History
#
# 2010-05-16
# TinyURL example and the idea for this comes from a bug filed by
# acolorado with patch provided by ghills. Class implementation
# was done by bear.
#
# Issue 19 http://code.google.com/p/python-twitter/issues/detail?id=19
#
class ShortenURL(object):
'''Helper class to make URL Shortener calls if/when required'''
def __init__(self,
userid=None,
password=None):
'''Instantiate a new ShortenURL object
Args:
userid: userid for any required authorization call [optional]
password: password for any required authorization call [optional]
'''
self.userid = userid
self.password = password
def Shorten(self,
longURL):
'''Call TinyURL API and returned shortened URL result
Args:
longURL: URL string to shorten
Returns:
The shortened URL as a string
Note:
longURL is required and no checks are made to ensure completeness
'''
result = None
f = urllib.urlopen("http://tinyurl.com/api-create.php?url=%s" % longURL)
try:
result = f.read()
finally:
f.close()
return result
| [
[
8,
0,
0.2606,
0.0563,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3099,
0.0141,
0,
0.66,
0.5,
614,
0,
1,
0,
0,
614,
0,
0
],
[
3,
0,
0.7535,
0.507,
0,
0.66,
... | [
"'''A class that defines the default URL Shortener.\n\nTinyURL is provided as the default and as an example.\n'''",
"import urllib",
"class ShortenURL(object):\n '''Helper class to make URL Shortener calls if/when required'''\n def __init__(self,\n userid=None,\n password=N... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from astm.version import __version__
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
# http://wiki.python.org/moin/Distutils/Cookbook/AutoPackageDiscovery
import os
def is_package(path):
return (
os.path.isdir(path) and
os.path.isfile(os.path.join(path, '__init__.py'))
)
def find_packages(path='.', base=""):
""" Find all packages in path """
packages = {}
for item in os.listdir(path):
dir = os.path.join(path, item)
if is_package(dir):
if base:
module_name = "%(base)s.%(item)s" % vars()
else:
module_name = item
packages[module_name] = dir
packages.update(find_packages(dir, module_name))
return packages
setup(
name = 'astm',
version = __version__,
description = 'Python implementation of ASTM E1381/1394 protocol.',
long_description = open('README').read(),
author = 'Alexander Shorin',
author_email = 'kxepal@gmail.com',
license = 'BSD',
url = 'http://code.google.com/p/python-astm',
install_requires = [],
test_suite = 'astm.tests',
zip_safe = True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Medical Science Apps.'
],
packages = find_packages(),
)
| [
[
1,
0,
0.1618,
0.0147,
0,
0.66,
0,
390,
0,
1,
0,
0,
390,
0,
0
],
[
7,
0,
0.3603,
0.3824,
0,
0.66,
0.5,
0,
0,
1,
0,
0,
0,
0,
9
],
[
1,
1,
0.1912,
0.0147,
1,
0.62,
... | [
"from astm.version import __version__",
"try:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup\n # http://wiki.python.org/moin/Distutils/Cookbook/AutoPackageDiscovery\n import os\n\n def is_package(path):",
" from setuptools import setup, fi... |
# -*- coding: utf-8 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
"""
.. module:: astm.asynclib
:synopsis: Forked version of asyncore mixed with asynchat.
.. moduleauthor:: Sam Rushing <rushing@nightmare.com>
.. sectionauthor:: Christopher Petrilli <petrilli@amber.org>
.. sectionauthor:: Steve Holden <sholden@holdenweb.com>
.. heavily adapted from original documentation by Sam Rushing
"""
import heapq
import logging
import os
import select
import socket
import sys
import time
from collections import deque
from errno import (
EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL,
ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN,
errorcode
)
from .compat import long, b, bytes, buffer
class ExitNow(Exception):
pass
_DISCONNECTED = frozenset((ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
EBADF))
_RERAISEABLE_EXC = (ExitNow, KeyboardInterrupt, SystemExit)
_SOCKET_MAP = {}
_SCHEDULED_TASKS = []
log = logging.getLogger(__name__)
def _strerror(err):
try:
return os.strerror(err)
except (ValueError, OverflowError, NameError):
if err in errorcode:
return errorcode[err]
return "Unknown error %s" % err
def read(obj):
"""Triggers ``handle_read_event`` for specified object."""
try:
obj.handle_read_event()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def write(obj):
"""Triggers ``handle_write_event`` for specified object."""
try:
obj.handle_write_event()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def exception(obj):
"""Triggers ``handle_exception_event`` for specified object."""
try:
obj.handle_exception_event()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & select.POLLIN:
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
if flags & select.POLLPRI:
obj.handle_exception_event()
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
obj.handle_close()
except socket.error as e:
if e.args[0] not in _DISCONNECTED:
obj.handle_error()
else:
obj.handle_close()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = map or _SOCKET_MAP
if map:
r = []; w = []; e = []
for fd, obj in map.items():
is_r = obj.readable()
is_w = obj.writable()
if is_r:
r.append(fd)
# accepting sockets should not be writable
if is_w and not obj.accepting:
w.append(fd)
if is_r or is_w:
e.append(fd)
if [] == r == w == e:
time.sleep(timeout)
return
try:
r, w, e = select.select(r, w, e, timeout)
except select.error as err:
if err.args[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
for fd in e:
obj = map.get(fd)
if obj is None:
continue
exception(obj)
def scheduler(tasks=None):
if tasks is None:
tasks = _SCHEDULED_TASKS
now = time.time()
while tasks and now >= tasks[0].timeout:
call = heapq.heappop(tasks)
if call.repush:
heapq.heappush(tasks, call)
call.repush = False
continue
try:
call.call()
finally:
if not call.cancelled:
call.cancel()
def loop(timeout=30.0, map=None, tasks=None, count=None):
"""
Enter a polling loop that terminates after count passes or all open
channels have been closed. All arguments are optional. The *count*
parameter defaults to None, resulting in the loop terminating only when all
channels have been closed. The *timeout* argument sets the timeout
parameter for the appropriate :func:`select` or :func:`poll` call, measured
in seconds; the default is 30 seconds. The *use_poll* parameter, if true,
indicates that :func:`poll` should be used in preference to :func:`select`
(the default is ``False``).
The *map* parameter is a dictionary whose items are the channels to watch.
As channels are closed they are deleted from their map. If *map* is
omitted, a global map is used. Channels (instances of
:class:`asyncore.dispatcher`, :class:`asynchat.async_chat` and subclasses
thereof) can freely be mixed in the map.
"""
if map is None:
map = _SOCKET_MAP
if tasks is None:
tasks = _SCHEDULED_TASKS
if count is None:
while map or tasks:
if map:
poll(timeout, map)
if tasks:
scheduler()
else:
while (map or tasks) and count > 0:
if map:
poll(timeout, map)
if tasks:
scheduler()
count -= 1
class call_later:
"""Calls a function at a later time.
It can be used to asynchronously schedule a call within the polling
loop without blocking it. The instance returned is an object that
can be used to cancel or reschedule the call.
"""
def __init__(self, seconds, target, *args, **kwargs):
"""
- seconds: the number of seconds to wait
- target: the callable object to call later
- args: the arguments to call it with
- kwargs: the keyword arguments to call it with
- _tasks: a reserved keyword to specify a different list to
store the delayed call instances.
"""
assert callable(target), "%s is not callable" % target
assert seconds >= 0, \
"%s is not greater than or equal to 0 seconds" % (seconds)
self.__delay = seconds
self.__target = target
self.__args = args
self.__kwargs = kwargs
self.__tasks = kwargs.pop('_tasks', _SCHEDULED_TASKS)
# seconds from the epoch at which to call the function
self.timeout = time.time() + self.__delay
self.repush = False
self.cancelled = False
heapq.heappush(self.__tasks, self)
def __lt__(self, other):
return self.timeout <= other.timeout
def call(self):
"""Call this scheduled function."""
assert not self.cancelled, "Already cancelled"
self.__target(*self.__args, **self.__kwargs)
def reset(self):
"""Reschedule this call resetting the current countdown."""
assert not self.cancelled, "Already cancelled"
self.timeout = time.time() + self.__delay
self.repush = True
def delay(self, seconds):
"""Reschedule this call for a later time."""
assert not self.cancelled, "Already cancelled."
assert seconds >= 0, \
"%s is not greater than or equal to 0 seconds" % (seconds)
self.__delay = seconds
newtime = time.time() + self.__delay
if newtime > self.timeout:
self.timeout = newtime
self.repush = True
else:
# XXX - slow, can be improved
self.timeout = newtime
heapq.heapify(self.__tasks)
def cancel(self):
"""Unschedule this call."""
assert not self.cancelled, "Already cancelled"
self.cancelled = True
del self.__target, self.__args, self.__kwargs
if self in self.__tasks:
pos = self.__tasks.index(self)
if pos == 0:
heapq.heappop(self.__tasks)
elif pos == len(self.__tasks) - 1:
self.__tasks.pop(pos)
else:
self.__tasks[pos] = self.__tasks.pop()
heapq._siftup(self.__tasks, pos)
class Dispatcher(object):
"""
The :class:`Dispatcher` class is a thin wrapper around a low-level socket
object. To make it more useful, it has a few methods for event-handling
which are called from the asynchronous loop. Otherwise, it can be treated
as a normal non-blocking socket object.
The firing of low-level events at certain times or in certain connection
states tells the asynchronous loop that certain higher-level events have
taken place. For example, if we have asked for a socket to connect to
another host, we know that the connection has been made when the socket
becomes writable for the first time (at this point you know that you may
write to it with the expectation of success). The implied higher-level
events are:
+----------------------+----------------------------------------+
| Event | Description |
+======================+========================================+
| ``handle_connect()`` | Implied by the first read or write |
| | event |
+----------------------+----------------------------------------+
| ``handle_close()`` | Implied by a read event with no data |
| | available |
+----------------------+----------------------------------------+
| ``handle_accept()`` | Implied by a read event on a listening |
| | socket |
+----------------------+----------------------------------------+
During asynchronous processing, each mapped channel's :meth:`readable` and
:meth:`writable` methods are used to determine whether the channel's socket
should be added to the list of channels :c:func:`select`\ ed or
:c:func:`poll`\ ed for read and write events.
"""
connected = False
accepting = False
addr = None
def __init__(self, sock=None, map=None):
if map is None:
self._map = _SOCKET_MAP
else:
self._map = map
self._fileno = None
if sock:
# Set to nonblocking just to make sure for cases where we
# get a socket from a blocking source.
sock.setblocking(0)
self.set_socket(sock, map)
self.connected = True
# The constructor no longer requires that the socket
# passed be connected.
try:
self.addr = sock.getpeername()
except socket.error as err:
if err.args[0] == ENOTCONN:
# To handle the case where we got an unconnected
# socket.
self.connected = False
else:
# The socket is broken in some unknown way, alert
# the user and remove it from the map (to prevent
# polling of broken sockets).
self._del_channel(map)
raise
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__ + '.' + self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
return '<%s at %#x>' % (' '.join(status), id(self))
__str__ = __repr__
def _add_channel(self, map=None):
log.debug('Adding channel %s' % self)
if map is None:
map = self._map
map[self._fileno] = self
def _del_channel(self, map=None):
fd = self._fileno
if map is None:
map = self._map
if fd in map:
log.debug('Closing channel %d:%s' % (fd, self))
del map[fd]
self._fileno = None
def create_socket(self, family, type):
"""
This is identical to the creation of a normal socket, and will use
the same options for creation. Refer to the :mod:`socket` documentation
for information on creating sockets.
"""
self.family_and_type = family, type
sock = socket.socket(family, type)
sock.setblocking(0)
self.set_socket(sock)
def set_socket(self, sock, map=None):
self.socket = sock
self._fileno = sock.fileno()
self._add_channel(map)
def set_reuse_addr(self):
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
def readable(self):
"""
Called each time around the asynchronous loop to determine whether a
channel's socket should be added to the list on which read events can
occur. The default method simply returns ``True``, indicating that by
default, all channels will be interested in read events."""
return True
def writable(self):
"""
Called each time around the asynchronous loop to determine whether a
channel's socket should be added to the list on which write events can
occur. The default method simply returns ``True``, indicating that by
default, all channels will be interested in write events.
"""
return True
def listen(self, num):
"""Listen for connections made to the socket.
The `num` argument specifies the maximum number of queued connections
and should be at least 1; the maximum value is system-dependent
(usually 5)."""
self.accepting = True
if os.name == 'nt' and num > 5:
num = 5
return self.socket.listen(num)
def bind(self, address):
"""Bind the socket to `address`.
The socket must not already be bound. The format of `address` depends
on the address family --- refer to the :mod:`socket` documentation for
more information. To mark the socket as re-usable (setting the
:const:`SO_REUSEADDR` option), call the :class:`Dispatcher` object's
:meth:`set_reuse_addr` method.
"""
self.addr = address
return self.socket.bind(address)
def connect(self, address):
"""
As with the normal socket object, `address` is a tuple with the first
element the host to connect to, and the second the port number.
"""
self.connected = False
self.addr = address
err = self.socket.connect_ex(address)
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK)\
or err == EINVAL and os.name in ('nt', 'ce'):
return
if err in (0, EISCONN):
self.handle_connect_event()
else:
raise socket.error(err, errorcode[err])
def accept(self):
"""Accept a connection.
The socket must be bound to an address and listening for connections.
The return value can be either ``None`` or a pair ``(conn, address)``
where `conn` is a *new* socket object usable to send and receive data on
the connection, and *address* is the address bound to the socket on the
other end of the connection.
When ``None`` is returned it means the connection didn't take place, in
which case the server should just ignore this event and keep listening
for further incoming connections.
"""
try:
conn, addr = self.socket.accept()
except TypeError:
return None
except socket.error as err:
if err.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
return None
else:
raise
else:
return conn, addr
def send(self, data):
"""Send `data` to the remote end-point of the socket."""
try:
log.debug('[%s:%d] <<< %r', self.addr[0], self.addr[1], data)
result = self.socket.send(data)
return result
except socket.error as err:
if err.args[0] == EWOULDBLOCK:
return 0
elif err.args[0] in _DISCONNECTED:
self.handle_close()
return 0
else:
raise
def recv(self, buffer_size):
"""Read at most `buffer_size` bytes from the socket's remote end-point.
An empty string implies that the channel has been closed from the other
end.
"""
try:
data = self.socket.recv(buffer_size)
log.debug('[%s:%d] >>> %r', self.addr[0], self.addr[1], data)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return b''
else:
return data
except socket.error as err:
# winsock sometimes throws ENOTCONN
if err.args[0] in _DISCONNECTED:
self.handle_close()
return b''
else:
raise
def close(self):
"""Close the socket.
All future operations on the socket object will fail.
The remote end-point will receive no more data (after queued data is
flushed). Sockets are automatically closed when they are
garbage-collected.
"""
self.connected = False
self.accepting = False
self._del_channel()
try:
self.socket.close()
except socket.error as err:
if err.args[0] not in (ENOTCONN, EBADF):
raise
def handle_read_event(self):
if self.accepting:
# accepting sockets are never connected, they "spawn" new
# sockets that are connected
self.handle_accept()
elif not self.connected:
self.handle_connect_event()
self.handle_read()
else:
self.handle_read()
def handle_connect_event(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect()
self.connected = True
def handle_write_event(self):
if self.accepting:
# Accepting sockets shouldn't get a write event.
# We will pretend it didn't happen.
return
if not self.connected:
#check for errors
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect_event()
self.handle_write()
def handle_exception_event(self):
# handle_exception_event() is called if there might be an error on the
# socket, or if there is OOB data
# check for the error condition first
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# we can get here when select.select() says that there is an
# exceptional condition on the socket
# since there is an error, we'll go ahead and close the socket
# like we would in a subclassed handle_read() that received no
# data
self.handle_close()
else:
self.handle_exception()
def handle_error(self):
"""
Called when an exception is raised and not otherwise handled.
The default version prints a condensed traceback.
"""
try:
self_repr = repr(self)
except Exception:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
log.exception('Uncatched python exception, closing channel %s',
self_repr)
self.handle_close()
def handle_exception(self):
log.exception('Unknown error')
def handle_read(self):
log.debug('Unhandled read event')
def handle_write(self):
"""
Called when the asynchronous loop detects that a writable socket can be
written. Often this method will implement the necessary buffering for
performance. For example::
def handle_write(self):
sent = self.send(self.buffer)
self.buffer = self.buffer[sent:]
"""
log.debug('Unhandled write event')
def handle_connect(self):
"""
Called when the active opener's socket actually makes a connection.
Might send a "welcome" banner, or initiate a protocol negotiation with
the remote endpoint, for example.
"""
log.info('[%s:%d] Connection established', self.addr[0], self.addr[1])
def handle_accept(self):
"""
Called on listening channels (passive openers) when a connection can be
established with a new remote endpoint that has issued a :meth:`connect`
call for the local endpoint.
"""
log.info('[%s:%d] Connection accepted', self.addr[0], self.addr[1])
def handle_close(self):
"""Called when the socket is closed."""
log.info('[%s:%d] Connection closed', self.addr[0], self.addr[1])
self.close()
def close_all(map=None, tasks=None, ignore_all=False):
if map is None:
map = _SOCKET_MAP
if tasks is None:
tasks = _SCHEDULED_TASKS
for x in list(map.values()):
try:
x.close()
except OSError as err:
if err.args[0] == EBADF:
pass
elif not ignore_all:
raise
except _RERAISEABLE_EXC:
raise
except Exception:
if not ignore_all:
raise
map.clear()
for x in tasks:
try:
x.cancel()
except _RERAISEABLE_EXC:
raise
except Exception:
if not ignore_all:
raise
del tasks[:]
class AsyncChat(Dispatcher):
"""
This class is an abstract subclass of :class:`Dispatcher`. To make
practical use of the code you must subclass :class:`AsyncChat`, providing
meaningful meth:`found_terminator` method.
The :class:`Dispatcher` methods can be used, although not all make
sense in a message/response context.
Like :class:`Dispatcher`, :class:`AsyncChat` defines a set of
events that are generated by an analysis of socket conditions after a
:c:func:`select` call. Once the polling loop has been started the
:class:`AsyncChat` object's methods are called by the event-processing
framework with no action on the part of the programmer.
"""
# these are overridable defaults
#: The asynchronous input buffer size.
recv_buffer_size = 4096
#: The asynchronous output buffer size.
send_buffer_size = 4096
#: Encoding usage is not enabled by default, because that is a
#: sign of an application bug that we don't want to pass silently.
use_encoding = False
#: Default encoding.
encoding = 'latin-1'
#: Remove terminator from the result data.
strip_terminator = True
_terminator = None
def __init__(self, sock=None, map=None):
# for string terminator matching
self._input_buffer = b''
self.inbox = deque()
self.outbox = deque()
super(AsyncChat, self).__init__(sock, map)
self.collect_incoming_data = self.pull
self.initiate_send = self.flush
def pull(self, data):
"""Puts `data` into incoming queue. Also available by alias
`collect_incoming_data`.
"""
self.inbox.append(data)
def found_terminator(self):
"""
Called when the incoming data stream matches the :attr:`termination`
condition. The default method, which must be overridden, raises a
:exc:`NotImplementedError` exception. The buffered input data should be
available via an instance attribute.
"""
raise NotImplementedError("must be implemented in subclass")
def _set_terminator(self, term):
self._terminator = term
def _get_terminator(self):
return self._terminator
#: The input delimiter and the terminating condition to be recognized on the
#: channel. May be any of three types of value, corresponding to three
#: different ways to handle incoming protocol data.
#:
#: +-----------+---------------------------------------------+
#: | term | Description |
#: +===========+=============================================+
#: | *string* | Will call :meth:`found_terminator` when the |
#: | | string is found in the input stream |
#: +-----------+---------------------------------------------+
#: | *integer* | Will call :meth:`found_terminator` when the |
#: | | indicated number of characters have been |
#: | | received |
#: +-----------+---------------------------------------------+
#: | ``None`` | The channel continues to collect data |
#: | | forever |
#: +-----------+---------------------------------------------+
#:
#: Note that any data following the terminator will be available for reading
#: by the channel after :meth:`found_terminator` is called.
terminator = property(_get_terminator, _set_terminator)
def handle_read(self):
try:
data = self.recv(self.recv_buffer_size)
except socket.error as err:
self.handle_error()
return
if self.use_encoding and not isinstance():
data = data.decode(self.encoding)
self._input_buffer += data
while self._input_buffer:
terminator = self.terminator
if not terminator:
handler = self._lookup_none_terminator
elif isinstance(terminator, (int, long)):
handler = self._lookup_int_terminator
elif isinstance(terminator, str):
handler = self._lookup_str_terminator
else:
handler = self._lookup_list_terminator
res = handler(self.terminator)
if res is None:
break
def _lookup_none_terminator(self, terminator):
self.pull(self._input_buffer)
self._input_buffer = ''
return False
def _lookup_int_terminator(self, terminator):
if len(self._input_buffer) < terminator:
self.pull(self._input_buffer)
self._input_buffer = ''
return False
else:
self.pull(self._input_buffer[:terminator])
self._input_buffer = self._input_buffer[terminator:]
self.found_terminator()
return True
def _lookup_list_terminator(self, terminator):
for item in terminator:
if self._input_buffer.find(item) != -1:
return self._lookup_str_terminator(item)
return self._lookup_none_terminator(terminator)
def _lookup_str_terminator(self, terminator):
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
terminator_len = len(terminator)
index = self._input_buffer.find(terminator)
if index != -1:
# we found the terminator
if self.strip_terminator and index > 0:
self.pull(self._input_buffer[:index])
elif not self.strip_terminator:
self.pull(self._input_buffer[:index+terminator_len])
self._input_buffer = self._input_buffer[index+terminator_len:]
# This does the Right Thing if the terminator is changed here.
self.found_terminator()
return True
else:
# check for a prefix of the terminator
index = find_prefix_at_end(self._input_buffer, terminator)
if index:
if index != len(self._input_buffer):
# we found a prefix, collect up to the prefix
self.pull(self._input_buffer[:-index])
self._input_buffer = self._input_buffer[-index:]
return None
else:
# no prefix, collect it all
self.pull(self._input_buffer)
self._input_buffer = ''
return False
def handle_write(self):
self.flush()
def push(self, data):
"""
Pushes data on to the channel's fifo to ensure its transmission.
This is all you need to do to have the channel write the data out to
the network.
"""
sabs = self.send_buffer_size
if len(data) > sabs:
for i in range(0, len(data), sabs):
self.outbox.append(data[i:i+sabs])
else:
self.outbox.append(data)
return self.flush()
def push_with_producer(self, producer):
self.outbox.append(producer)
return self.flush()
def readable(self):
"""Predicate for inclusion in the readable for select()"""
return True
def writable(self):
"""Predicate for inclusion in the writable for select()"""
# For nonblocking sockets connect() will not set self.connected flag,
# due to EINPROGRESS socket error which is actually promise for
# successful connection.
return bool(self.outbox or not self.connected)
def close_when_done(self):
"""Automatically close this channel once the outgoing queue is empty."""
self.outbox.append(None)
def flush(self):
"""Sends all data from outgoing queue."""
while self.outbox and self.connected:
self._send_chunky(self.outbox.popleft())
def _send_chunky(self, data):
"""Sends data as chunks sized by ``send_buffer_size`` value.
Returns ``True`` on success, ``False`` on error and ``None`` on closing
event.
"""
if self.use_encoding and not isinstance(data, bytes):
data = data.encode(self.encoding)
while True:
if data is None:
self.handle_close()
return
obs = self.send_buffer_size
bdata = buffer(data, 0, obs)
try:
num_sent = self.send(bdata)
except socket.error:
self.handle_error()
return False
if num_sent and num_sent < len(bdata) or obs < len(data):
data = data[num_sent:]
else:
return True
def discard_buffers(self):
"""In emergencies this method will discard any data held in the input
and output buffers."""
self.discard_input_buffers()
self.discard_output_buffers()
def discard_input_buffers(self):
self._input_buffer = b('')
self.inbox.clear()
def discard_output_buffers(self):
self.outbox.clear()
def find_prefix_at_end(haystack, needle):
l = len(needle) - 1
while l and not haystack.endswith(needle[:l]):
l -= 1
return l
| [
[
8,
0,
0.0165,
0.0085,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0223,
0.0011,
0,
0.66,
0.0345,
251,
0,
1,
0,
0,
251,
0,
0
],
[
1,
0,
0.0234,
0.0011,
0,
0.66... | [
"\"\"\"\n.. module:: astm.asynclib\n :synopsis: Forked version of asyncore mixed with asynchat.\n.. moduleauthor:: Sam Rushing <rushing@nightmare.com>\n.. sectionauthor:: Christopher Petrilli <petrilli@amber.org>\n.. sectionauthor:: Steve Holden <sholden@holdenweb.com>\n.. heavily adapted from original documentat... |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from collections import Iterable
from .compat import unicode
from .constants import (
STX, ETX, ETB, CR, LF, CRLF,
FIELD_SEP, COMPONENT_SEP, RECORD_SEP, REPEAT_SEP, ENCODING
)
try:
from itertools import izip_longest
except ImportError: # Python 3
from itertools import zip_longest as izip_longest
def decode(data, encoding=ENCODING):
"""Common ASTM decoding function that tries to guess which kind of data it
handles.
If `data` starts with STX character (``0x02``) than probably it is
full ASTM message with checksum and other system characters.
If `data` starts with digit character (``0-9``) than probably it is
frame of records leading by his sequence number. No checksum is expected
in this case.
Otherwise it counts `data` as regular record structure.
Note, that `data` should be bytes, not unicode string even if you know his
`encoding`.
:param data: ASTM data object.
:type data: bytes
:param encoding: Data encoding.
:type encoding: str
:return: List of ASTM records with unicode data.
:rtype: list
"""
if not isinstance(data, bytes):
raise TypeError('bytes expected, got %r' % data)
if data.startswith(STX): # may be decode message \x02...\x03CS\r\n
seq, records, cs = decode_message(data, encoding)
return records
byte = data[:1].decode()
if byte.isdigit():
seq, records = decode_frame(data, encoding)
return records
return [decode_record(data, encoding)]
def decode_message(message, encoding):
"""Decodes complete ASTM message that is sent or received due
communication routines. It should contains checksum that would be
additionally verified.
:param message: ASTM message.
:type message: bytes
:param encoding: Data encoding.
:type encoding: str
:returns: Tuple of three elements:
* :class:`int` frame sequence number.
* :class:`list` of records with unicode data.
* :class:`bytes` checksum.
:raises:
* :exc:`ValueError` if ASTM message is malformed.
* :exc:`AssertionError` if checksum verification fails.
"""
if not isinstance(message, bytes):
raise TypeError('bytes expected, got %r' % message)
if not (message.startswith(STX) and message.endswith(CRLF)):
raise ValueError('Malformed ASTM message. Expected that it will started'
' with %x and followed by %x%x characters. Got: %r'
' ' % (ord(STX), ord(CR), ord(LF), message))
stx, frame_cs = message[0], message[1:-2]
frame, cs = frame_cs[:-2], frame_cs[-2:]
ccs = make_checksum(frame)
assert cs == ccs, 'Checksum failure: expected %r, calculated %r' % (cs, ccs)
seq, records = decode_frame(frame, encoding)
return seq, records, cs.decode()
def decode_frame(frame, encoding):
"""Decodes ASTM frame: list of records followed by sequence number."""
if not isinstance(frame, bytes):
raise TypeError('bytes expected, got %r' % frame)
if frame.endswith(CR + ETX):
frame = frame[:-2]
elif frame.endswith(ETB):
frame = frame[:-1]
else:
raise ValueError('Incomplete frame data %r.'
' Expected trailing <CR><ETX> or <ETB> chars' % frame)
seq = frame[:1].decode()
if not seq.isdigit():
raise ValueError('Malformed ASTM frame. Expected leading seq number %r'
'' % frame)
seq, records = int(seq), frame[1:]
return seq, [decode_record(record, encoding)
for record in records.split(RECORD_SEP)]
def decode_record(record, encoding):
"""Decodes ASTM record message."""
fields = []
for item in record.split(FIELD_SEP):
if REPEAT_SEP in item:
item = decode_repeated_component(item, encoding)
elif COMPONENT_SEP in item:
item = decode_component(item, encoding)
else:
item = item.decode(encoding)
fields.append([None, item][bool(item)])
return fields
def decode_component(field, encoding):
"""Decodes ASTM field component."""
return [[None, item.decode(encoding)][bool(item)]
for item in field.split(COMPONENT_SEP)]
def decode_repeated_component(component, encoding):
"""Decodes ASTM field repeated component."""
return [decode_component(item, encoding)
for item in component.split(REPEAT_SEP)]
def encode(records, encoding=ENCODING, size=None, seq=1):
"""Encodes list of records into single ASTM message, also called as "packed"
message.
If you need to get each record as standalone message use :func:`iter_encode`
instead.
If the result message is too large (greater than specified `size` if it's
not :const:`None`), than it will be split by chunks.
:param records: List of ASTM records.
:type records: list
:param encoding: Data encoding.
:type encoding: str
:param size: Chunk size in bytes.
:type size: int
:param seq: Frame start sequence number.
:type seq: int
:return: List of ASTM message chunks.
:rtype: list
"""
msg = encode_message(seq, records, encoding)
if size is not None and len(msg) > size:
return list(split(msg, size))
return [msg]
def iter_encode(records, encoding=ENCODING, size=None, seq=1):
"""Encodes and emits each record as separate message.
If the result message is too large (greater than specified `size` if it's
not :const:`None`), than it will be split by chunks.
:yields: ASTM message chunks.
:rtype: str
"""
for record in records:
msg = encode_message(seq, [record], encoding)
if size is not None and len(msg) > size:
for chunk in split(msg, size):
seq += 1
yield chunk
else:
seq += 1
yield msg
def encode_message(seq, records, encoding):
"""Encodes ASTM message.
:param seq: Frame sequence number.
:type seq: int
:param records: List of ASTM records.
:type records: list
:param encoding: Data encoding.
:type encoding: str
:return: ASTM complete message with checksum and other control characters.
:rtype: str
"""
data = RECORD_SEP.join(encode_record(record, encoding)
for record in records)
data = b''.join((str(seq % 8).encode(), data, CR, ETX))
return b''.join([STX, data, make_checksum(data), CR, LF])
def encode_record(record, encoding):
"""Encodes single ASTM record.
:param record: ASTM record. Each :class:`str`-typed item counted as field
value, one level nested :class:`list` counted as components
and second leveled - as repeated components.
:type record: list
:param encoding: Data encoding.
:type encoding: str
:returns: Encoded ASTM record.
:rtype: str
"""
fields = []
_append = fields.append
for field in record:
if isinstance(field, bytes):
_append(field)
elif isinstance(field, unicode):
_append(field.encode(encoding))
elif isinstance(field, Iterable):
_append(encode_component(field, encoding))
elif field is None:
_append(b'')
else:
_append(unicode(field).encode(encoding))
return FIELD_SEP.join(fields)
def encode_component(component, encoding):
"""Encodes ASTM record field components."""
items = []
_append = items.append
for item in component:
if isinstance(item, bytes):
_append(item)
elif isinstance(item, unicode):
_append(item.encode(encoding))
elif isinstance(item, Iterable):
return encode_repeated_component(component, encoding)
elif item is None:
_append(b'')
else:
_append(unicode(item).encode(encoding))
return COMPONENT_SEP.join(items).rstrip(COMPONENT_SEP)
def encode_repeated_component(components, encoding):
"""Encodes repeated components."""
return REPEAT_SEP.join(encode_component(item, encoding)
for item in components)
def make_checksum(message):
"""Calculates checksum for specified message.
:param message: ASTM message.
:type message: bytes
:returns: Checksum value that is actually byte sized integer in hex base
:rtype: bytes
"""
if not isinstance(message[0], int):
message = map(ord, message)
return hex(sum(message) & 0xFF)[2:].upper().zfill(2).encode()
def make_chunks(s, n):
iter_bytes = (s[i:i + 1] for i in range(len(s)))
return [b''.join(item)
for item in izip_longest(*[iter_bytes] * n, fillvalue=b'')]
def split(msg, size):
"""Split `msg` into chunks with specified `size`.
Chunk `size` value couldn't be less then 7 since each chunk goes with at
least 7 special characters: STX, frame number, ETX or ETB, checksum and
message terminator.
:param msg: ASTM message.
:type msg: bytes
:param size: Chunk size in bytes.
:type size: int
:yield: `bytes`
"""
stx, frame, msg, tail = msg[:1], msg[1:2], msg[2:-6], msg[-6:]
assert stx == STX
assert frame.isdigit()
assert tail.endswith(CRLF)
assert size is not None and size >= 7
frame = int(frame)
chunks = make_chunks(msg, size - 7)
chunks, last = chunks[:-1], chunks[-1]
idx = 0
for idx, chunk in enumerate(chunks):
item = b''.join([str((idx + frame) % 8).encode(), chunk, ETB])
yield b''.join([STX, item, make_checksum(item), CRLF])
item = b''.join([str((idx + frame + 1) % 8).encode(), last, CR, ETX])
yield b''.join([STX, item, make_checksum(item), CRLF])
def join(chunks):
"""Merges ASTM message `chunks` into single message.
:param chunks: List of chunks as `bytes`.
:type chunks: iterable
"""
msg = b'1' + b''.join(c[2:-5] for c in chunks) + ETX
return b''.join([STX, msg, make_checksum(msg), CRLF])
def is_chunked_message(message):
"""Checks plain message for chunked byte."""
length = len(message)
if len(message) < 5:
return False
if ETB not in message:
return False
return message.index(ETB) == length - 5
| [
[
1,
0,
0.0299,
0.003,
0,
0.66,
0,
193,
0,
1,
0,
0,
193,
0,
0
],
[
1,
0,
0.0328,
0.003,
0,
0.66,
0.05,
238,
0,
1,
0,
0,
238,
0,
0
],
[
1,
0,
0.0403,
0.0119,
0,
0.66... | [
"from collections import Iterable",
"from .compat import unicode",
"from .constants import (\n STX, ETX, ETB, CR, LF, CRLF,\n FIELD_SEP, COMPONENT_SEP, RECORD_SEP, REPEAT_SEP, ENCODING\n)",
"try:\n from itertools import izip_longest\nexcept ImportError: # Python 3\n from itertools import zip_long... |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import logging
import socket
from .asynclib import Dispatcher, loop
from .codec import decode_message, is_chunked_message, join
from .constants import ACK, CRLF, EOT, NAK, ENCODING
from .exceptions import InvalidState, NotAccepted
from .protocol import ASTMProtocol
log = logging.getLogger(__name__)
__all__ = ['BaseRecordsDispatcher', 'RequestHandler', 'Server']
class BaseRecordsDispatcher(object):
"""Abstract dispatcher of received ASTM records by :class:`RequestHandler`.
You need to override his handlers or extend dispatcher for your needs.
For instance::
class Dispatcher(BaseRecordsDispatcher):
def __init__(self, encoding=None):
super(Dispatcher, self).__init__(encoding)
# extend it for your needs
self.dispatch['M'] = self.my_handler
# map custom wrappers for ASTM records to their type if you
# don't like to work with raw data.
self.wrapper['M'] = MyWrapper
def on_header(self, record):
# initialize state for this session
...
def on_patient(self, record):
# handle patient info
...
# etc handlers
def my_handler(self, record):
# handle custom record that wasn't implemented yet by
# python-astm due to some reasons
...
After defining our dispatcher, we left only to let :class:`Server` use it::
server = Server(dispatcher=Dispatcher)
"""
#: Encoding of received messages.
encoding = ENCODING
def __init__(self, encoding=None):
self.encoding = encoding or self.encoding
self.dispatch = {
'H': self.on_header,
'C': self.on_comment,
'P': self.on_patient,
'O': self.on_order,
'R': self.on_result,
'S': self.on_scientific,
'M': self.on_manufacturer_info,
'L': self.on_terminator
}
self.wrappers = {}
def __call__(self, message):
seq, records, cs = decode_message(message, self.encoding)
for record in records:
self.dispatch.get(record[0], self.on_unknown)(self.wrap(record))
def wrap(self, record):
rtype = record[0]
if rtype in self.wrappers:
return self.wrappers[rtype](*record)
return record
def _default_handler(self, record):
log.warn('Record remains unprocessed: %s', record)
def on_header(self, record):
"""Header record handler."""
self._default_handler(record)
def on_comment(self, record):
"""Comment record handler."""
self._default_handler(record)
def on_patient(self, record):
"""Patient record handler."""
self._default_handler(record)
def on_order(self, record):
"""Order record handler."""
self._default_handler(record)
def on_result(self, record):
"""Result record handler."""
self._default_handler(record)
def on_scientific(self, record):
"""Scientific record handler."""
self._default_handler(record)
def on_manufacturer_info(self, record):
"""Manufacturer information record handler."""
self._default_handler(record)
def on_terminator(self, record):
"""Terminator record handler."""
self._default_handler(record)
def on_unknown(self, record):
"""Fallback handler for dispatcher."""
self._default_handler(record)
class RequestHandler(ASTMProtocol):
"""ASTM protocol request handler.
:param sock: Socket object.
:param dispatcher: Request handler records dispatcher instance.
:type dispatcher: :class:`BaseRecordsDispatcher`
:param timeout: Number of seconds to wait for incoming data before
connection closing.
:type timeout: int
"""
def __init__(self, sock, dispatcher, timeout=None):
super(RequestHandler, self).__init__(sock, timeout=timeout)
self._chunks = []
host, port = sock.getpeername() if sock is not None else (None, None)
self.client_info = {'host': host, 'port': port}
self.dispatcher = dispatcher
self._is_transfer_state = False
self.terminator = 1
def on_enq(self):
if not self._is_transfer_state:
self._is_transfer_state = True
self.terminator = [CRLF, EOT]
return ACK
else:
log.error('ENQ is not expected')
return NAK
def on_ack(self):
raise NotAccepted('Server should not be ACKed.')
def on_nak(self):
raise NotAccepted('Server should not be NAKed.')
def on_eot(self):
if self._is_transfer_state:
self._is_transfer_state = False
self.terminator = 1
else:
raise InvalidState('Server is not ready to accept EOT message.')
def on_message(self):
if not self._is_transfer_state:
self.discard_input_buffers()
return NAK
else:
try:
self.handle_message(self._last_recv_data)
return ACK
except Exception:
log.exception('Error occurred on message handling.')
return NAK
def handle_message(self, message):
self.is_chunked_transfer = is_chunked_message(message)
if self.is_chunked_transfer:
self._chunks.append(message)
elif self._chunks:
self._chunks.append(message)
self.dispatcher(join(self._chunks))
self._chunks = []
else:
self.dispatcher(message)
def discard_input_buffers(self):
self._chunks = []
return super(RequestHandler, self).discard_input_buffers()
def on_timeout(self):
"""Closes connection on timeout."""
super(RequestHandler, self).on_timeout()
self.close()
class Server(Dispatcher):
"""Asyncore driven ASTM server.
:param host: Server IP address or hostname.
:type host: str
:param port: Server port number.
:type port: int
:param request: Custom server request handler. If omitted the
:class:`RequestHandler` will be used by default.
:param dispatcher: Custom request handler records dispatcher. If omitted the
:class:`BaseRecordsDispatcher` will be used by default.
:param timeout: :class:`RequestHandler` connection timeout. If :const:`None`
request handler will wait for data before connection
closing.
:type timeout: int
:param encoding: :class:`Dispatcher <BaseRecordsDispatcher>`\'s encoding.
:type encoding: str
"""
request = RequestHandler
dispatcher = BaseRecordsDispatcher
def __init__(self, host='localhost', port=15200,
request=None, dispatcher=None,
timeout=None, encoding=None):
super(Server, self).__init__()
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
self.pool = []
self.timeout = timeout
self.encoding = encoding
if request is not None:
self.request = request
if dispatcher is not None:
self.dispatcher = dispatcher
def handle_accept(self):
pair = self.accept()
if pair is None:
return
sock, addr = pair
self.request(sock, self.dispatcher(self.encoding), timeout=self.timeout)
super(Server, self).handle_accept()
def serve_forever(self, *args, **kwargs):
"""Enters into the :func:`polling loop <asynclib.loop>` to let server
handle incoming requests."""
loop(*args, **kwargs)
| [
[
1,
0,
0.0391,
0.0039,
0,
0.66,
0,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.043,
0.0039,
0,
0.66,
0.0909,
687,
0,
1,
0,
0,
687,
0,
0
],
[
1,
0,
0.0469,
0.0039,
0,
0... | [
"import logging",
"import socket",
"from .asynclib import Dispatcher, loop",
"from .codec import decode_message, is_chunked_message, join",
"from .constants import ACK, CRLF, EOT, NAK, ENCODING",
"from .exceptions import InvalidState, NotAccepted",
"from .protocol import ASTMProtocol",
"log = logging.... |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import logging
from .asynclib import AsyncChat, call_later
from .records import HeaderRecord, TerminatorRecord
from .constants import STX, ENQ, ACK, NAK, EOT, ENCODING
log = logging.getLogger(__name__)
__all__ = ['ASTMProtocol']
class ASTMProtocol(AsyncChat):
"""Common ASTM protocol routines."""
#: ASTM header record class.
astm_header = HeaderRecord
#: ASTM terminator record class.
astm_terminator = TerminatorRecord
#: Flag about chunked transfer.
is_chunked_transfer = None
#: IO timer
timer = None
encoding = ENCODING
strip_terminator = False
_last_recv_data = None
_last_sent_data = None
def __init__(self, sock=None, map=None, timeout=None):
super(ASTMProtocol, self).__init__(sock, map)
if timeout is not None:
self.timer = call_later(timeout, self.on_timeout)
def found_terminator(self):
while self.inbox:
data = self.inbox.popleft()
if not data:
continue
self.dispatch(data)
def dispatch(self, data):
"""Dispatcher of received data."""
self._last_recv_data = data
if data == ENQ:
handler = self.on_enq
elif data == ACK:
handler = self.on_ack
elif data == NAK:
handler = self.on_nak
elif data == EOT:
handler = self.on_eot
elif data.startswith(STX): # this looks like a message
handler = self.on_message
else:
handler = lambda: self.default_handler(data)
resp = handler()
if resp is not None:
self.push(resp)
def default_handler(self, data):
raise ValueError('Unable to dispatch data: %r', data)
def push(self, data):
self._last_sent_data = data
if self.timer is not None and not self.timer.cancelled:
self.timer.reset()
return super(ASTMProtocol, self).push(data)
def on_enq(self):
"""Calls on <ENQ> message receiving."""
def on_ack(self):
"""Calls on <ACK> message receiving."""
def on_nak(self):
"""Calls on <NAK> message receiving."""
def on_eot(self):
"""Calls on <EOT> message receiving."""
def on_message(self):
"""Calls on ASTM message receiving."""
def on_timeout(self):
"""Calls when timeout event occurs. Used to limit waiting time for
response data."""
log.warning('Communication timeout')
def handle_read(self):
if self.timer is not None and not self.timer.cancelled:
self.timer.reset()
super(ASTMProtocol, self).handle_read()
def handle_close(self):
if self.timer is not None and not self.timer.cancelled:
self.timer.cancel()
super(ASTMProtocol, self).handle_close()
| [
[
1,
0,
0.0935,
0.0093,
0,
0.66,
0,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.1028,
0.0093,
0,
0.66,
0.1667,
591,
0,
2,
0,
0,
591,
0,
0
],
[
1,
0,
0.1121,
0.0093,
0,
... | [
"import logging",
"from .asynclib import AsyncChat, call_later",
"from .records import HeaderRecord, TerminatorRecord",
"from .constants import STX, ENQ, ACK, NAK, EOT, ENCODING",
"log = logging.getLogger(__name__)",
"__all__ = ['ASTMProtocol']",
"class ASTMProtocol(AsyncChat):\n \"\"\"Common ASTM p... |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
class BaseASTMError(Exception):
"""Base ASTM error."""
class InvalidState(BaseASTMError):
"""Should be raised in case of invalid ASTM handler state."""
class NotAccepted(BaseException):
"""Received data is not acceptable."""
class Rejected(BaseASTMError):
"""Should be raised after unsuccessful attempts to send data
(receiver sends with <NAK> reply)."""
| [
[
3,
0,
0.4038,
0.0769,
0,
0.66,
0,
305,
0,
0,
0,
0,
645,
0,
0
],
[
8,
1,
0.4231,
0.0385,
1,
0.46,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
3,
0,
0.5577,
0.0769,
0,
0.66,
... | [
"class BaseASTMError(Exception):\n \"\"\"Base ASTM error.\"\"\"",
" \"\"\"Base ASTM error.\"\"\"",
"class InvalidState(BaseASTMError):\n \"\"\"Should be raised in case of invalid ASTM handler state.\"\"\"",
" \"\"\"Should be raised in case of invalid ASTM handler state.\"\"\"",
"class NotAccepte... |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import logging
import socket
from .asynclib import loop
from .codec import encode
from .constants import ENQ, EOT
from .exceptions import NotAccepted
from .mapping import Record
from .protocol import ASTMProtocol
log = logging.getLogger(__name__)
__all__ = ['Client', 'Emitter']
class RecordsStateMachine(object):
"""Simple state machine to track emitting ASTM records in right order.
:param mapping: Mapping of the ASTM records flow order.
Keys should be string and defines record type, while values
expected as sequence of other record types that may be used
after current one.
For example: ``{"H": ["P", "C", "L"]}`` mapping defines that
if previous record had ``"H"`` type, then the next one
should have ``"P"``, ``"C"`` or ``"L"`` type or
:exc:`AssertionError` will be raised. The default mapping
reflects common ASTM records flow rules. If this argument
specified as :const:`None` no rules will be applied.
:type: dict
"""
def __init__(self, mapping):
self.mapping = mapping
self.state = None
def __call__(self, state):
if state is not None:
assert self.is_acceptable(state),\
'invalid state %r, expected one of: %r' \
% (state, self.mapping[self.state])
self.state = state
def is_acceptable(self, state):
if self.mapping is None:
return True
if state not in self.mapping:
return False
next_types = self.mapping[self.state]
return '*' in next_types or state in next_types
DEFAULT_RECORDS_FLOW_MAP = {
None: ['H'],
'H': ['C', 'M', 'P', 'L'],
'P': ['C', 'M', 'O', 'L'],
'O': ['C', 'M', 'P', 'O', 'R', 'L'],
'R': ['C', 'M', 'P', 'O', 'R', 'S', 'L'],
'S': ['C', 'M', 'P', 'O', 'R', 'S', 'L'],
'C': ['*'],
'M': ['*'],
'L': ['H']
}
class Emitter(object):
"""ASTM records emitter for :class:`Client`.
Used as wrapper for user provided one to provide proper routines around for
sending Header and Terminator records.
:param emitter: Generator/coroutine.
:param encoding: Data encoding.
:type encoding: str
:param flow_map: Records flow map. Used by :class:`RecordsStateMachine`.
:type: dict
:param chunk_size: Chunk size in bytes. If :const:`None`, emitter record
wouldn't be split into chunks.
:type chunk_size: int
:param bulk_mode: Sends all records for single session (starts from Header
and ends with Terminator records) via single message
instead of sending each record separately. If result
message is too long, it may be split by chunks if
`chunk_size` is not :const:`None`. Keep in mind, that
collecting all records for single session may take some
time and server may reject data by timeout reason.
:type bulk_mode: bool
"""
#: Records state machine controls emitting records in right order. It
#: receives `records_flow_map` as only argument on Emitter initialization.
state_machine = RecordsStateMachine
def __init__(self, emitter, flow_map, encoding,
chunk_size=None, bulk_mode=False):
self._emitter = emitter()
self._is_active = False
self.encoding = encoding
self.records_sm = self.state_machine(flow_map)
# flag to signal that user's emitter produces no records
self.empty = False
# last sent sequence number
self.last_seq = 0
self.buffer = []
self.chunk_size = chunk_size
self.bulk_mode = bulk_mode
def _get_record(self, value=None):
record = self._emitter.send(value if self._is_active else None)
if not self._is_active:
self._is_active = True
if isinstance(record, Record):
record = record.to_astm()
try:
self.records_sm(record[0])
except Exception as err:
self.throw(type(err), err.args)
return record
def _send_record(self, record):
if self.bulk_mode:
records = [record]
while True:
record = self._get_record(True)
records.append(record)
if record[0] == 'L':
break
chunks = encode(records, self.encoding, self.chunk_size)
else:
self.last_seq += 1
chunks = encode([record], self.encoding,
self.chunk_size, self.last_seq)
self.buffer.extend(chunks)
data = self.buffer.pop(0)
self.last_seq += len(self.buffer)
if record[0] == 'L':
self.last_seq = 0
self.buffer.append(EOT)
return data
def send(self, value=None):
"""Passes `value` to the emitter. Semantically acts in same way as
:meth:`send` for generators.
If the emitter has any value within local `buffer` the returned value
will be extracted from it unless `value` is :const:`False`.
:param value: Callback value. :const:`True` indicates that previous
record was successfully received and accepted by server,
:const:`False` signs about his rejection.
:type value: bool
:return: Next record data to send to server.
:rtype: bytes
"""
if self.buffer and value:
return self.buffer.pop(0)
record = self._get_record(value)
return self._send_record(record)
def throw(self, exc_type, exc_val=None, exc_tb=None):
"""Raises exception inside the emitter. Acts in same way as
:meth:`throw` for generators.
If the emitter had catch an exception and return any record value, it
will be proceeded in common way.
"""
record = self._emitter.throw(exc_type, exc_val, exc_tb)
if record is not None:
return self._send_record(record)
def close(self):
"""Closes the emitter. Acts in same way as :meth:`close` for generators.
"""
self._emitter.close()
class Client(ASTMProtocol):
"""Common ASTM client implementation.
:param emitter: Generator function that will produce ASTM records.
:type emitter: function
:param host: Server IP address or hostname.
:type host: str
:param port: Server port number.
:type port: int
:param timeout: Time to wait for response from server. If response wasn't
received, the :meth:`on_timeout` will be called.
If :const:`None` this timer will be disabled.
:type timeout: int
:param flow_map: Records flow map. Used by :class:`RecordsStateMachine`.
:type: dict
:param chunk_size: Chunk size in bytes. :const:`None` value prevents
records chunking.
:type chunk_size: int
:param bulk_mode: Sends all records for single session (starts from Header
and ends with Terminator records) via single message
instead of sending each record separately. If result
message is too long, it may be split by chunks if
`chunk_size` is not :const:`None`. Keep in mind, that
collecting all records for single session may take some
time and server may reject data by timeout reason.
:type bulk_mode: bool
Base `emitter` is a generator that yield ASTM records one by one preserving
their order::
from astm.records import (
HeaderRecord, PatientRecord, OrderRecord, TerminatorRecord
)
def emitter():
assert (yield HeaderRecord()), 'header was rejected'
ok = yield PatientRecord(name={'last': 'foo', 'first': 'bar'})
if ok: # you also can decide what to do in case of record rejection
assert (yield OrderRecord())
yield TerminatorRecord() # we may do not care about rejection
:class:`Client` thought :class:`RecordsStateMachine` keep track
on this order, raising :exc:`AssertionError` if it is broken.
When `emitter` terminates with :exc:`StopIteration` or :exc:`GeneratorExit`
exception client connection to server closing too. You may provide endless
`emitter` by wrapping function body with ``while True: ...`` loop polling
data from source from time to time. Note, that server may have communication
timeouts control and may close session after some time of inactivity, so
be sure that you're able to send whole session (started by Header record and
ended by Terminator one) within limited time frame (commonly 10-15 sec.).
"""
#: Wrapper of emitter to provide session context and system logic about
#: sending head and tail data.
emitter_wrapper = Emitter
def __init__(self, emitter, host='localhost', port=15200,
encoding=None, timeout=20, flow_map=DEFAULT_RECORDS_FLOW_MAP,
chunk_size=None, bulk_mode=False):
super(Client, self).__init__(timeout=timeout)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((host, port))
self.emitter = self.emitter_wrapper(
emitter,
encoding=encoding or self.encoding,
flow_map=flow_map,
chunk_size=chunk_size,
bulk_mode=bulk_mode
)
self.terminator = 1
def handle_connect(self):
"""Initiates ASTM communication session."""
super(Client, self).handle_connect()
self._open_session()
def handle_close(self):
self.emitter.close()
super(Client, self).handle_close()
def _open_session(self):
self.push(ENQ)
def _close_session(self, close_connection=False):
self.push(EOT)
if close_connection:
self.close_when_done()
def run(self, timeout=1.0, *args, **kwargs):
"""Enters into the :func:`polling loop <astm.asynclib.loop>` to let
client send outgoing requests."""
loop(timeout, *args, **kwargs)
def on_enq(self):
"""Raises :class:`NotAccepted` exception."""
raise NotAccepted('Client should not receive ENQ.')
def on_ack(self):
"""Handles ACK response from server.
Provides callback value :const:`True` to the emitter and sends next
message to server.
"""
try:
message = self.emitter.send(True)
except StopIteration:
self._close_session(True)
else:
self.push(message)
if message == EOT:
self._open_session()
def on_nak(self):
"""Handles NAK response from server.
If it was received on ENQ request, the client tries to repeat last
request for allowed amount of attempts. For others it send callback
value :const:`False` to the emitter."""
if self._last_sent_data == ENQ:
return self.push(ENQ)
try:
message = self.emitter.send(False)
except StopIteration:
self._close_session(True)
except Exception:
self._close_session(True)
raise
else:
self.push(message)
if message == EOT:
self._open_session()
def on_eot(self):
"""Raises :class:`NotAccepted` exception."""
raise NotAccepted('Client should not receive EOT.')
def on_message(self):
"""Raises :class:`NotAccepted` exception."""
raise NotAccepted('Client should not receive ASTM message.')
def on_timeout(self):
"""Sends final EOT message and closes connection after his receiving."""
super(Client, self).on_timeout()
self._close_session(True)
| [
[
1,
0,
0.0292,
0.0029,
0,
0.66,
0,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0321,
0.0029,
0,
0.66,
0.0769,
687,
0,
1,
0,
0,
687,
0,
0
],
[
1,
0,
0.035,
0.0029,
0,
0... | [
"import logging",
"import socket",
"from .asynclib import loop",
"from .codec import encode",
"from .constants import ENQ, EOT",
"from .exceptions import NotAccepted",
"from .mapping import Record",
"from .protocol import ASTMProtocol",
"log = logging.getLogger(__name__)",
"__all__ = ['Client', 'E... |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import datetime
import decimal
import inspect
import time
import warnings
from operator import itemgetter
from itertools import islice
try:
from itertools import izip_longest
except ImportError: # Python 3
from itertools import zip_longest as izip_longest
from .compat import basestring, unicode, long
def make_string(value):
if isinstance(value, unicode):
return value
elif isinstance(value, bytes):
return unicode(value, 'utf-8')
else:
return unicode(value)
class Field(object):
"""Base mapping field class."""
def __init__(self, name=None, default=None, required=False, length=None):
self.name = name
self.default = default
self.required = required
self.length = length
def __get__(self, instance, owner):
if instance is None:
return self
value = instance._data.get(self.name)
if value is not None:
value = self._get_value(value)
elif self.default is not None:
default = self.default
if hasattr(default, '__call__'):
default = default()
value = default
return value
def __set__(self, instance, value):
if value is not None:
value = self._set_value(value)
instance._data[self.name] = value
def _get_value(self, value):
return value
def _set_value(self, value):
value = make_string(value)
if self.length is not None and len(value) > self.length:
raise ValueError('Field %r value is too long (max %d, got %d)'
'' % (self.name, self.length, len(value)))
return value
class MetaMapping(type):
def __new__(mcs, name, bases, d):
fields = []
names = []
def merge_fields(items):
for name, field in items:
if field.name is None:
field.name = name
if name not in names:
fields.append((name, field))
names.append(name)
else:
fields[names.index(name)] = (name, field)
for base in bases:
if hasattr(base, '_fields'):
merge_fields(base._fields)
merge_fields([(k, v) for k, v in d.items() if isinstance(v, Field)])
if '_fields' not in d:
d['_fields'] = fields
else:
merge_fields(d['_fields'])
d['_fields'] = fields
return super(MetaMapping, mcs).__new__(mcs, name, bases, d)
_MappingProxy = MetaMapping('_MappingProxy', (object,), {}) # Python 3 workaround
class Mapping(_MappingProxy):
def __init__(self, *args, **kwargs):
fieldnames = map(itemgetter(0), self._fields)
values = dict(izip_longest(fieldnames, args))
values.update(kwargs)
self._data = {}
for attrname, field in self._fields:
attrval = values.pop(attrname, None)
if attrval is None:
setattr(self, attrname, getattr(self, attrname))
else:
setattr(self, attrname, attrval)
if values:
raise ValueError('Unexpected kwargs found: %r' % values)
@classmethod
def build(cls, *a):
fields = []
newcls = type('Generic' + cls.__name__, (cls,), {})
for field in a:
if field.name is None:
raise ValueError('Name is required for ordered fields.')
setattr(newcls, field.name, field)
fields.append((field.name, field))
newcls._fields = fields
return newcls
def __getitem__(self, key):
return self.values()[key]
def __setitem__(self, key, value):
setattr(self, self._fields[key][0], value)
def __delitem__(self, key):
self._data[self._fields[key][0]] = None
def __iter__(self):
return iter(self.values())
def __contains__(self, item):
return item in self.values()
def __len__(self):
return len(self._data)
def __eq__(self, other):
if len(self) != len(other):
return False
for key, value in zip(self.keys(), other):
if getattr(self, key) != value:
return False
return True
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join('%s=%r' % (key, value)
for key, value in self.items()))
def keys(self):
return [key for key, field in self._fields]
def values(self):
return [getattr(self, key) for key in self.keys()]
def items(self):
return [(key, getattr(self, key)) for key, field in self._fields]
def to_astm(self):
def values(obj):
for key, field in obj._fields:
value = obj._data[key]
if isinstance(value, Mapping):
yield list(values(value))
elif isinstance(value, list):
stack = []
for item in value:
if isinstance(item, Mapping):
stack.append(list(values(item)))
else:
stack.append(item)
yield stack
elif value is None and field.required:
raise ValueError('Field %r value should not be None' % key)
else:
yield value
return list(values(self))
class Record(Mapping):
"""ASTM record mapping class."""
class Component(Mapping):
"""ASTM component mapping class."""
class TextField(Field):
"""Mapping field for string values."""
def _set_value(self, value):
if not isinstance(value, basestring):
raise TypeError('String value expected, got %r' % value)
return super(TextField, self)._set_value(value)
class ConstantField(Field):
"""Mapping field for constant values.
>>> class Record(Mapping):
... type = ConstantField(default='S')
>>> rec = Record()
>>> rec.type
'S'
>>> rec.type = 'W'
Traceback (most recent call last):
...
ValueError: Field changing not allowed
"""
def __init__(self, name=None, default=None, field=Field()):
super(ConstantField, self).__init__(name, default, True, None)
self.field = field
self.required = True
if self.default is None:
raise ValueError('Constant value should be defined')
def _get_value(self, value):
return self.default
def _set_value(self, value):
value = self.field._get_value(value)
if self.default != value:
raise ValueError('Field changing not allowed: got %r, accepts %r'
'' % (value, self.default))
return super(ConstantField, self)._set_value(value)
class IntegerField(Field):
"""Mapping field for integer values."""
def _get_value(self, value):
return int(value)
def _set_value(self, value):
if not isinstance(value, (int, long)):
try:
value = self._get_value(value)
except Exception:
raise TypeError('Integer value expected, got %r' % value)
return super(IntegerField, self)._set_value(value)
class DecimalField(Field):
"""Mapping field for decimal values."""
def _get_value(self, value):
return decimal.Decimal(value)
def _set_value(self, value):
if not isinstance(value, (int, long, float, decimal.Decimal)):
raise TypeError('Decimal value expected, got %r' % value)
return super(DecimalField, self)._set_value(value)
class DateField(Field):
"""Mapping field for storing date/time values."""
format = '%Y%m%d'
def _get_value(self, value):
return datetime.datetime.strptime(value, self.format)
def _set_value(self, value):
if isinstance(value, basestring):
value = self._get_value(value)
if not isinstance(value, (datetime.datetime, datetime.date)):
raise TypeError('Datetime value expected, got %r' % value)
return value.strftime(self.format)
class TimeField(Field):
"""Mapping field for storing times."""
format = '%H%M%S'
def _get_value(self, value):
if isinstance(value, basestring):
try:
value = value.split('.', 1)[0] # strip out microseconds
value = datetime.time(*time.strptime(value, self.format)[3:6])
except ValueError:
raise ValueError('Value %r does not match format %s'
'' % (value, self.format))
return value
def _set_value(self, value):
if isinstance(value, basestring):
value = self._get_value(value)
if not isinstance(value, (datetime.datetime, datetime.time)):
raise TypeError('Datetime value expected, got %r' % value)
if isinstance(value, datetime.datetime):
value = value.time()
return value.replace(microsecond=0).strftime(self.format)
class DateTimeField(Field):
"""Mapping field for storing date/time values."""
format = '%Y%m%d%H%M%S'
def _get_value(self, value):
return datetime.datetime.strptime(value, self.format)
def _set_value(self, value):
if isinstance(value, basestring):
value = self._get_value(value)
if not isinstance(value, (datetime.datetime, datetime.date)):
raise TypeError('Datetime value expected, got %r' % value)
return value.strftime(self.format)
class SetField(Field):
"""Mapping field for predefined set of values."""
def __init__(self, name=None, default=None,
required=False, length=None,
values=None, field=Field()):
super(SetField, self).__init__(name, default, required, length)
self.field = field
self.values = values and set(values) or set([])
def _get_value(self, value):
return self.field._get_value(value)
def _set_value(self, value):
value = self.field._get_value(value)
if value not in self.values:
raise ValueError('Unexpectable value %r' % value)
return self.field._set_value(value)
class ComponentField(Field):
"""Mapping field for storing record component."""
def __init__(self, mapping, name=None, default=None):
self.mapping = mapping
default = default or mapping()
super(ComponentField, self).__init__(name, default)
def _get_value(self, value):
if isinstance(value, dict):
return self.mapping(**value)
elif isinstance(value, self.mapping):
return value
else:
return self.mapping(*value)
def _set_value(self, value):
if isinstance(value, dict):
return self.mapping(**value)
elif isinstance(value, self.mapping):
return value
if isinstance(value, basestring):
value = [value]
return self.mapping(*value)
class RepeatedComponentField(Field):
"""Mapping field for storing list of record components."""
def __init__(self, field, name=None, default=None):
if isinstance(field, ComponentField):
self.field = field
else:
assert isinstance(field, type) and issubclass(field, Mapping)
self.field = ComponentField(field)
default = default or []
super(RepeatedComponentField, self).__init__(name, default)
class Proxy(list):
def __init__(self, seq, field):
list.__init__(self, seq)
self.list = seq
self.field = field
def _to_list(self):
return [list(self.field._get_value(item)) for item in self.list]
def __add__(self, other):
obj = type(self)(self.list, self.field)
obj.extend(other)
return obj
def __iadd__(self, other):
self.extend(other)
return self
def __mul__(self, other):
return type(self)(self.list * other, self.field)
def __imul__(self, other):
self.list *= other
return self
def __lt__(self, other):
return self._to_list() < other
def __le__(self, other):
return self._to_list() <= other
def __eq__(self, other):
return self._to_list() == other
def __ne__(self, other):
return self._to_list() != other
def __ge__(self, other):
return self._to_list() >= other
def __gt__(self, other):
return self._to_list() > other
def __repr__(self):
return '<ListProxy %s %r>' % (self.list, list(self))
def __str__(self):
return str(self.list)
def __unicode__(self):
return unicode(self.list)
def __delitem__(self, index):
del self.list[index]
def __getitem__(self, index):
return self.field._get_value(self.list[index])
def __setitem__(self, index, value):
self.list[index] = self.field._set_value(value)
def __delslice__(self, i, j):
del self.list[i:j]
def __getslice__(self, i, j):
return self.__class__(self.list[i:j], self.field)
def __setslice__(self, i, j, seq):
self.list[i:j] = [self.field._set_value(v) for v in seq]
def __contains__(self, value):
for item in self:
if item == value:
return True
return False
def __iter__(self):
for index in range(len(self)):
yield self[index]
def __len__(self):
return len(self.list)
def __nonzero__(self):
return bool(self.list)
def __reduce__(self):
return self.list.__reduce__()
def __reduce_ex__(self, *args, **kwargs):
return self.list.__reduce_ex__(*args, **kwargs)
def append(self, item):
self.list.append(self.field._set_value(item))
def count(self, value):
return self._to_list().count(value)
def extend(self, other):
self.list.extend([self.field._set_value(i) for i in other])
def index(self, value, start=None, stop=None):
start = start or 0
for idx, item in enumerate(islice(self, start, stop)):
if item == value:
return idx + start
else:
raise ValueError('%r not in list' % value)
def insert(self, index, object):
self.list.insert(index, self.field._set_value(object))
def remove(self, value):
for item in self:
if item == value:
return self.list.remove(value)
raise ValueError('Value %r not in list' % value)
def pop(self, index=-1):
return self.field._get_value(self.list.pop(index))
def sort(self, cmp=None, key=None, reverse=False):
raise NotImplementedError('In place sorting not allowed.')
# update docstrings from list
for name, obj in inspect.getmembers(Proxy):
if getattr(list, name, None) is None\
or name in ['__module__', '__doc__']:
continue
if not inspect.isfunction(obj):
continue
obj.__doc__ = getattr(list, name).__doc__
del name, obj
def _get_value(self, value):
return self.Proxy(value, self.field)
def _set_value(self, value):
return [self.field._set_value(item) for item in value]
class NotUsedField(Field):
"""Mapping field for value that should be used. Acts as placeholder.
On attempt to assign something to it raises :exc:`UserWarning` and rejects
assigned value."""
def __init__(self, name=None):
super(NotUsedField, self).__init__(name)
def _get_value(self, value):
return None
def _set_value(self, value):
warnings.warn('Field %r is not used, any assignments are omitted'
'' % self.name, UserWarning)
return None
| [
[
1,
0,
0.0191,
0.0019,
0,
0.66,
0,
426,
0,
1,
0,
0,
426,
0,
0
],
[
1,
0,
0.021,
0.0019,
0,
0.66,
0.04,
349,
0,
1,
0,
0,
349,
0,
0
],
[
1,
0,
0.0229,
0.0019,
0,
0.6... | [
"import datetime",
"import decimal",
"import inspect",
"import time",
"import warnings",
"from operator import itemgetter",
"from itertools import islice",
"try:\n from itertools import izip_longest\nexcept ImportError: # Python 3\n from itertools import zip_longest as izip_longest\n from .co... |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
__version_info__ = (0, 6, 0, 'dev', 0)
__version__ = '{version}{tag}{build}'.format(
version='.'.join(map(str, __version_info__[:3])),
tag='-' + __version_info__[3] if __version_info__[3] else '',
build='.' + str(__version_info__[4]) if __version_info__[4] else ''
)
| [
[
14,
0,
0.6667,
0.0667,
0,
0.66,
0,
218,
0,
0,
0,
0,
0,
8,
0
],
[
14,
0,
0.8667,
0.3333,
0,
0.66,
1,
162,
3,
3,
0,
0,
293,
10,
4
]
] | [
"__version_info__ = (0, 6, 0, 'dev', 0)",
"__version__ = '{version}{tag}{build}'.format(\n version='.'.join(map(str, __version_info__[:3])),\n tag='-' + __version_info__[3] if __version_info__[3] else '',\n build='.' + str(__version_info__[4]) if __version_info__[4] else ''\n)"
] |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
class DummyMixIn(object):
_input_buffer = ''
addr = ('localhost', 15200)
def flush(self):
pass
def close(self):
pass
class CallLogger(object):
def __init__(self, func):
self.func = func
self.was_called = False
def __call__(self, *args, **kwargs):
self.was_called = True
return self.func(*args, **kwargs)
def track_call(func):
return CallLogger(func)
| [
[
3,
0,
0.4412,
0.2647,
0,
0.66,
0,
587,
0,
2,
0,
0,
186,
0,
0
],
[
14,
1,
0.3529,
0.0294,
1,
0.79,
0,
808,
1,
0,
0,
0,
0,
3,
0
],
[
14,
1,
0.3824,
0.0294,
1,
0.79,... | [
"class DummyMixIn(object):\n _input_buffer = ''\n addr = ('localhost', 15200)\n\n def flush(self):\n pass\n\n def close(self):",
" _input_buffer = ''",
" addr = ('localhost', 15200)",
" def flush(self):\n pass",
" def close(self):\n pass",
"class CallLogger(obj... |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import os
import sys
import unittest
def suite():
suite = unittest.TestSuite()
for root, dirs, files in os.walk('.'):
for file in files:
if not (file.startswith('test_') and file.endswith('.py')):
continue
name = file.split('.')[0]
modname = os.path.join(root, name).replace(os.path.sep, '.')
modname = modname.lstrip('.')
tests = unittest.defaultTestLoader.loadTestsFromName(modname)
for test in tests:
suite.addTests(test)
sys.stdout.write('%s : %s tests%s'
% (modname, tests.countTestCases(), os.linesep))
sys.stdout.flush()
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| [
[
1,
0,
0.303,
0.0303,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.3333,
0.0303,
0,
0.66,
0.25,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.3636,
0.0303,
0,
0.6... | [
"import os",
"import sys",
"import unittest",
"def suite():\n suite = unittest.TestSuite()\n for root, dirs, files in os.walk('.'):\n for file in files:\n if not (file.startswith('test_') and file.endswith('.py')):\n continue\n name = file.split('.')[0]\n ... |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
"""Common ASTM records structure.
This module contains base ASTM records mappings with only defined common
required fields for most implementations. Others are marked as
:class:`~astm.mapping.NotUsedField` and should be defined explicitly for your
ASTM realisation.
"""
from datetime import datetime
from .mapping import (
Record, ConstantField, DateTimeField, IntegerField, NotUsedField,
TextField, RepeatedComponentField, Component
)
__all__ = ['HeaderRecord', 'PatientRecord', 'OrderRecord',
'ResultRecord', 'CommentRecord', 'TerminatorRecord']
#: +-----+--------------+---------------------------------+-------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+=================================+===================+
#: | 1 | 7.1.1 | ASTM Record Type ID | type |
#: +-----+--------------+---------------------------------+-------------------+
#: | 2 | 7.1.2 | Delimiter Definition | delimeter |
#: +-----+--------------+---------------------------------+-------------------+
#: | 3 | 7.1.3 | Message Control ID | message_id |
#: +-----+--------------+---------------------------------+-------------------+
#: | 4 | 7.1.4 | Access Password | password |
#: +-----+--------------+---------------------------------+-------------------+
#: | 5 | 7.1.5 | Sender Name or ID | sender |
#: +-----+--------------+---------------------------------+-------------------+
#: | 6 | 7.1.6 | Sender Street Address | address |
#: +-----+--------------+---------------------------------+-------------------+
#: | 7 | 7.1.7 | Reserved Field | reserved |
#: +-----+--------------+---------------------------------+-------------------+
#: | 8 | 7.1.8 | Sender Telephone Number | phone |
#: +-----+--------------+---------------------------------+-------------------+
#: | 9 | 7.1.9 | Characteristics of Sender | caps |
#: +-----+--------------+---------------------------------+-------------------+
#: | 10 | 7.1.10 | Receiver ID | receiver |
#: +-----+--------------+---------------------------------+-------------------+
#: | 11 | 7.1.11 | Comments | comments |
#: +-----+--------------+---------------------------------+-------------------+
#: | 12 | 7.1.12 | Processing ID | processing_id |
#: +-----+--------------+---------------------------------+-------------------+
#: | 13 | 7.1.13 | Version Number | version |
#: +-----+--------------+---------------------------------+-------------------+
#: | 14 | 7.1.14 | Date/Time of Message | timestamp |
#: +-----+--------------+---------------------------------+-------------------+
#:
HeaderRecord = Record.build(
ConstantField(name='type', default='H'),
RepeatedComponentField(Component.build(
ConstantField(name='_', default=''),
TextField(name='__')
), name='delimeter', default=[[], ['', '&']]),
# ^^^ workaround to define field:
# ConstantField(name='delimeter', default='\^&'),
NotUsedField(name='message_id'),
NotUsedField(name='password'),
NotUsedField(name='sender'),
NotUsedField(name='address'),
NotUsedField(name='reserved'),
NotUsedField(name='phone'),
NotUsedField(name='caps'),
NotUsedField(name='receiver'),
NotUsedField(name='comments'),
ConstantField(name='processing_id', default='P'),
NotUsedField(name='version'),
DateTimeField(name='timestamp', default=datetime.now, required=True),
)
#: +-----+--------------+---------------------------------+-------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+=================================+===================+
#: | 1 | 8.1.1 | Record Type ID | type |
#: +-----+--------------+---------------------------------+-------------------+
#: | 2 | 8.1.2 | Sequence Number | seq |
#: +-----+--------------+---------------------------------+-------------------+
#: | 3 | 8.1.3 | Practice Assigned Patient ID | practice_id |
#: +-----+--------------+---------------------------------+-------------------+
#: | 4 | 8.1.4 | Laboratory Assigned Patient ID | laboratory_id |
#: +-----+--------------+---------------------------------+-------------------+
#: | 5 | 8.1.5 | Patient ID | id |
#: +-----+--------------+---------------------------------+-------------------+
#: | 6 | 8.1.6 | Patient Name | name |
#: +-----+--------------+---------------------------------+-------------------+
#: | 7 | 8.1.7 | Mother’s Maiden Name | maiden_name |
#: +-----+--------------+---------------------------------+-------------------+
#: | 8 | 8.1.8 | Birthdate | birthdate |
#: +-----+--------------+---------------------------------+-------------------+
#: | 9 | 8.1.9 | Patient Sex | sex |
#: +-----+--------------+---------------------------------+-------------------+
#: | 10 | 8.1.10 | Patient Race-Ethnic Origin | race |
#: +-----+--------------+---------------------------------+-------------------+
#: | 11 | 8.1.11 | Patient Address | address |
#: +-----+--------------+---------------------------------+-------------------+
#: | 12 | 8.1.12 | Reserved Field | reserved |
#: +-----+--------------+---------------------------------+-------------------+
#: | 13 | 8.1.13 | Patient Telephone Number | phone |
#: +-----+--------------+---------------------------------+-------------------+
#: | 14 | 8.1.14 | Attending Physician ID | physician_id |
#: +-----+--------------+---------------------------------+-------------------+
#: | 15 | 8.1.15 | Special Field #1 | special_1 |
#: +-----+--------------+---------------------------------+-------------------+
#: | 16 | 8.1.16 | Special Field #2 | special_2 |
#: +-----+--------------+---------------------------------+-------------------+
#: | 17 | 8.1.17 | Patient Height | height |
#: +-----+--------------+---------------------------------+-------------------+
#: | 18 | 8.1.18 | Patient Weight | weight |
#: +-----+--------------+---------------------------------+-------------------+
#: | 19 | 8.1.19 | Patient’s Known Diagnosis | diagnosis |
#: +-----+--------------+---------------------------------+-------------------+
#: | 20 | 8.1.20 | Patient’s Active Medication | medication |
#: +-----+--------------+---------------------------------+-------------------+
#: | 21 | 8.1.21 | Patient’s Diet | diet |
#: +-----+--------------+---------------------------------+-------------------+
#: | 22 | 8.1.22 | Practice Field No. 1 | practice_field_1 |
#: +-----+--------------+---------------------------------+-------------------+
#: | 23 | 8.1.23 | Practice Field No. 2 | practice_field_2 |
#: +-----+--------------+---------------------------------+-------------------+
#: | 24 | 8.1.24 | Admission/Discharge Dates | admission_date |
#: +-----+--------------+---------------------------------+-------------------+
#: | 25 | 8.1.25 | Admission Status | admission_status |
#: +-----+--------------+---------------------------------+-------------------+
#: | 26 | 8.1.26 | Location | location |
#: +-----+--------------+---------------------------------+-------------------+
#:
PatientRecord = Record.build(
ConstantField(name='type', default='P'),
IntegerField(name='seq', default=1, required=True),
NotUsedField(name='practice_id'),
NotUsedField(name='laboratory_id'),
NotUsedField(name='id'),
NotUsedField(name='name'),
NotUsedField(name='maiden_name'),
NotUsedField(name='birthdate'),
NotUsedField(name='sex'),
NotUsedField(name='race'),
NotUsedField(name='address'),
NotUsedField(name='reserved'),
NotUsedField(name='phone'),
NotUsedField(name='physician_id'),
NotUsedField(name='special_1'),
NotUsedField(name='special_2'),
NotUsedField(name='height'),
NotUsedField(name='weight'),
NotUsedField(name='diagnosis'),
NotUsedField(name='medication'),
NotUsedField(name='diet'),
NotUsedField(name='practice_field_1'),
NotUsedField(name='practice_field_2'),
NotUsedField(name='admission_date'),
NotUsedField(name='admission_status'),
NotUsedField(name='location'),
NotUsedField(name='diagnostic_code_nature'),
NotUsedField(name='diagnostic_code'),
NotUsedField(name='religion'),
NotUsedField(name='martial_status'),
NotUsedField(name='isolation_status'),
NotUsedField(name='language'),
NotUsedField(name='hospital_service'),
NotUsedField(name='hospital_institution'),
NotUsedField(name='dosage_category'),
)
#: +-----+--------------+--------------------------------+--------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+================================+====================+
#: | 1 | 9.4.1 | Record Type ID | type |
#: +-----+--------------+--------------------------------+--------------------+
#: | 2 | 9.4.2 | Sequence Number | seq |
#: +-----+--------------+--------------------------------+--------------------+
#: | 3 | 9.4.3 | Specimen ID | sample_id |
#: +-----+--------------+--------------------------------+--------------------+
#: | 4 | 9.4.4 | Instrument Specimen ID | instrument |
#: +-----+--------------+--------------------------------+--------------------+
#: | 5 | 9.4.5 | Universal Test ID | test |
#: +-----+--------------+--------------------------------+--------------------+
#: | 6 | 9.4.6 | Priority | priority |
#: +-----+--------------+--------------------------------+--------------------+
#: | 7 | 9.4.7 | Requested/Ordered Date/Time | created_at |
#: +-----+--------------+--------------------------------+--------------------+
#: | 8 | 9.4.8 | Specimen Collection Date/Time | sampled_at |
#: +-----+--------------+--------------------------------+--------------------+
#: | 9 | 9.4.9 | Collection End Time | collected_at |
#: +-----+--------------+--------------------------------+--------------------+
#: | 10 | 9.4.10 | Collection Volume | volume |
#: +-----+--------------+--------------------------------+--------------------+
#: | 11 | 9.4.11 | Collector ID | collector |
#: +-----+--------------+--------------------------------+--------------------+
#: | 12 | 9.4.12 | Action Code | action_code |
#: +-----+--------------+--------------------------------+--------------------+
#: | 13 | 9.4.13 | Danger Code | danger_code |
#: +-----+--------------+--------------------------------+--------------------+
#: | 14 | 9.4.14 | Relevant Information | clinical_info |
#: +-----+--------------+--------------------------------+--------------------+
#: | 15 | 9.4.15 | Date/Time Specimen Received | delivered_at |
#: +-----+--------------+--------------------------------+--------------------+
#: | 16 | 9.4.16 | Specimen Descriptor | biomaterial |
#: +-----+--------------+--------------------------------+--------------------+
#: | 17 | 9.4.17 | Ordering Physician | physician |
#: +-----+--------------+--------------------------------+--------------------+
#: | 18 | 9.4.18 | Physician’s Telephone # | physician_phone |
#: +-----+--------------+--------------------------------+--------------------+
#: | 19 | 9.4.19 | User Field No. 1 | user_field_1 |
#: +-----+--------------+--------------------------------+--------------------+
#: | 20 | 9.4.20 | User Field No. 2 | user_field_2 |
#: +-----+--------------+--------------------------------+--------------------+
#: | 21 | 9.4.21 | Laboratory Field No. 1 | laboratory_field_1 |
#: +-----+--------------+--------------------------------+--------------------+
#: | 22 | 9.4.22 | Laboratory Field No. 2 | laboratory_field_2 |
#: +-----+--------------+--------------------------------+--------------------+
#: | 23 | 9.4.23 | Date/Time Reported | modified_at |
#: +-----+--------------+--------------------------------+--------------------+
#: | 24 | 9.4.24 | Instrument Charge | instrument_charge |
#: +-----+--------------+--------------------------------+--------------------+
#: | 25 | 9.4.25 | Instrument Section ID | instrument_section |
#: +-----+--------------+--------------------------------+--------------------+
#: | 26 | 9.4.26 | Report Type | report_type |
#: +-----+--------------+--------------------------------+--------------------+
#:
OrderRecord = Record.build(
ConstantField(name='type', default='O'),
IntegerField(name='seq', default=1, required=True),
NotUsedField(name='sample_id'),
NotUsedField(name='instrument'),
NotUsedField(name='test'),
NotUsedField(name='priority'),
NotUsedField(name='created_at'),
NotUsedField(name='sampled_at'),
NotUsedField(name='collected_at'),
NotUsedField(name='volume'),
NotUsedField(name='collector'),
NotUsedField(name='action_code'),
NotUsedField(name='danger_code'),
NotUsedField(name='clinical_info'),
NotUsedField(name='delivered_at'),
NotUsedField(name='biomaterial'),
NotUsedField(name='physician'),
NotUsedField(name='physician_phone'),
NotUsedField(name='user_field_1'),
NotUsedField(name='user_field_2'),
NotUsedField(name='laboratory_field_1'),
NotUsedField(name='laboratory_field_2'),
NotUsedField(name='modified_at'),
NotUsedField(name='instrument_charge'),
NotUsedField(name='instrument_section'),
NotUsedField(name='report_type'),
NotUsedField(name='reserved'),
NotUsedField(name='location_ward'),
NotUsedField(name='infection_flag'),
NotUsedField(name='specimen_service'),
NotUsedField(name='laboratory')
)
#: +-----+--------------+--------------------------------+--------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+================================+====================+
#: | 1 | 10.1.1 | Record Type ID | type |
#: +-----+--------------+--------------------------------+--------------------+
#: | 2 | 10.1.2 | Sequence Number | seq |
#: +-----+--------------+--------------------------------+--------------------+
#: | 3 | 10.1.3 | Universal Test ID | test |
#: +-----+--------------+--------------------------------+--------------------+
#: | 4 | 10.1.4 | Data or Measurement Value | value |
#: +-----+--------------+--------------------------------+--------------------+
#: | 5 | 10.1.5 | Units | units |
#: +-----+--------------+--------------------------------+--------------------+
#: | 6 | 10.1.6 | Reference Ranges | references |
#: +-----+--------------+--------------------------------+--------------------+
#: | 7 | 10.1.7 | Result Abnormal Flags | abnormal_flag |
#: +-----+--------------+--------------------------------+--------------------+
#: | 8 | 10.1.8 | Nature of Abnormal Testing | abnormality_nature |
#: +-----+--------------+--------------------------------+--------------------+
#: | 9 | 10.1.9 | Results Status | status |
#: +-----+--------------+--------------------------------+--------------------+
#: | 10 | 10.1.10 | Date of Change in Instrument | norms_changed_at |
#: | | | Normative Values | |
#: +-----+--------------+--------------------------------+--------------------+
#: | 11 | 10.1.11 | Operator Identification | operator |
#: +-----+--------------+--------------------------------+--------------------+
#: | 12 | 10.1.12 | Date/Time Test Started | started_at |
#: +-----+--------------+--------------------------------+--------------------+
#: | 13 | 10.1.13 | Date/Time Test Complete | completed_at |
#: +-----+--------------+--------------------------------+--------------------+
#: | 14 | 10.1.14 | Instrument Identification | instrument |
#: +-----+--------------+--------------------------------+--------------------+
#:
ResultRecord = Record.build(
ConstantField(name='type', default='R'),
IntegerField(name='seq', default=1, required=True),
NotUsedField(name='test'),
NotUsedField(name='value'),
NotUsedField(name='units'),
NotUsedField(name='references'),
NotUsedField(name='abnormal_flag'),
NotUsedField(name='abnormality_nature'),
NotUsedField(name='status'),
NotUsedField(name='norms_changed_at'),
NotUsedField(name='operator'),
NotUsedField(name='started_at'),
NotUsedField(name='completed_at'),
NotUsedField(name='instrument'),
)
#: +-----+--------------+---------------------------------+-------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+=================================+===================+
#: | 1 | 11.1.1 | Record Type ID | type |
#: +-----+--------------+---------------------------------+-------------------+
#: | 2 | 11.1.2 | Sequence Number | seq |
#: +-----+--------------+---------------------------------+-------------------+
#: | 3 | 11.1.3 | Comment Source | source |
#: +-----+--------------+---------------------------------+-------------------+
#: | 4 | 11.1.4 | Comment Text | data |
#: +-----+--------------+---------------------------------+-------------------+
#: | 5 | 11.1.5 | Comment Type | ctype |
#: +-----+--------------+---------------------------------+-------------------+
#:
CommentRecord = Record.build(
ConstantField(name='type', default='C'),
IntegerField(name='seq', default=1, required=True),
NotUsedField(name='source'),
NotUsedField(name='data'),
NotUsedField(name='ctype')
)
#: +-----+--------------+---------------------------------+-------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+=================================+===================+
#: | 1 | 13.1.1 | Record Type ID | type |
#: +-----+--------------+---------------------------------+-------------------+
#: | 2 | 13.1.2 | Sequence Number | seq |
#: +-----+--------------+---------------------------------+-------------------+
#: | 3 | 13.1.3 | Termination code | code |
#: +-----+--------------+---------------------------------+-------------------+
#:
TerminatorRecord = Record.build(
ConstantField(name='type', default='L'),
ConstantField(name='seq', default=1, field=IntegerField()),
ConstantField(name='code', default='N')
)
#: +-----+--------------+---------------------------------+-------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+=================================+===================+
#: | 1 | 14.1.1 | Record Type ID | type |
#: +-----+--------------+---------------------------------+-------------------+
#: | 2 | 14.1.2 | Sequence Number | seq |
#: +-----+--------------+---------------------------------+-------------------+
#: | 3 | 14.1.3 | Analytical Method | method |
#: +-----+--------------+---------------------------------+-------------------+
#: | 4 | 14.1.4 | Instrumentation | instrument |
#: +-----+--------------+---------------------------------+-------------------+
#: | 5 | 14.1.5 | Reagents | reagents |
#: +-----+--------------+---------------------------------+-------------------+
#: | 6 | 14.1.6 | Units of Measure | units |
#: +-----+--------------+---------------------------------+-------------------+
#: | 7 | 14.1.7 | Quality Control | qc |
#: +-----+--------------+---------------------------------+-------------------+
#: | 8 | 14.1.8 | Specimen Descriptor | biomaterial |
#: +-----+--------------+---------------------------------+-------------------+
#: | 9 | 14.1.9 | Reserved Field | reserved |
#: +-----+--------------+---------------------------------+-------------------+
#: | 10 | 14.1.10 | Container | container |
#: +-----+--------------+---------------------------------+-------------------+
#: | 11 | 14.1.11 | Specimen ID | sample_id |
#: +-----+--------------+---------------------------------+-------------------+
#: | 12 | 14.1.12 | Analyte | analyte |
#: +-----+--------------+---------------------------------+-------------------+
#: | 13 | 14.1.13 | Result | result |
#: +-----+--------------+---------------------------------+-------------------+
#: | 14 | 14.1.14 | Result Units | result_units |
#: +-----+--------------+---------------------------------+-------------------+
#: | 15 | 14.1.15 | Collection Date and Time | sampled_at |
#: +-----+--------------+---------------------------------+-------------------+
#: | 16 | 14.1.16 | Result Date and Time | completed_at |
#: +-----+--------------+---------------------------------+-------------------+
#: | 17 | 14.1.17 | Analytical Preprocessing Steps | preanalytics |
#: +-----+--------------+---------------------------------+-------------------+
#: | 18 | 14.1.18 | Patient Diagnosis | diagnosis |
#: +-----+--------------+---------------------------------+-------------------+
#: | 19 | 14.1.19 | Patient Birthdate | birthdate |
#: +-----+--------------+---------------------------------+-------------------+
#: | 20 | 14.1.20 | Patient Sex | sex |
#: +-----+--------------+---------------------------------+-------------------+
#: | 21 | 14.1.21 | Patient Race | race |
#: +-----+--------------+---------------------------------+-------------------+
#:
ScientificRecord = Record.build(
ConstantField(name='type', default='S'),
IntegerField(name='seq', default=1, required=True),
NotUsedField(name='method'),
NotUsedField(name='instrument'),
NotUsedField(name='reagents'),
NotUsedField(name='units'),
NotUsedField(name='qc'),
NotUsedField(name='biomaterial'),
NotUsedField(name='reserved'),
NotUsedField(name='container'),
NotUsedField(name='sample_id'),
NotUsedField(name='analyte'),
NotUsedField(name='result'),
NotUsedField(name='result_units'),
NotUsedField(name='sampled_at'),
NotUsedField(name='completed_at'),
NotUsedField(name='preanalytics'),
NotUsedField(name='diagnosis'),
NotUsedField(name='birthdate'),
NotUsedField(name='sex'),
NotUsedField(name='race'),
)
#: +-----+--------------+---------------------------------+-------------------+
#: | # | ASTM Field # | ASTM Name | Python alias |
#: +=====+==============+=================================+===================+
#: | 1 | 15.1.1 | Record Type ID | type |
#: +-----+--------------+---------------------------------+-------------------+
#: | 2 | 15.1.2 | Sequence Number | seq |
#: +-----+--------------+---------------------------------+-------------------+
#:
#: .. note::
#: This record, which is similar to the comment record, may be used to send
#: complex structures where use of the existing record types would not be
#: appropriate. The fields within this record type are defined by the
#: manufacturer.
#:
ManufacturerInfoRecord = Record.build(
ConstantField(name='type', default='M'),
IntegerField(name='seq', default=1, required=True),
)
| [
[
8,
0,
0.0303,
0.018,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0449,
0.0022,
0,
0.66,
0.0909,
426,
0,
1,
0,
0,
426,
0,
0
],
[
1,
0,
0.0517,
0.0112,
0,
0.66,... | [
"\"\"\"Common ASTM records structure.\n\n\nThis module contains base ASTM records mappings with only defined common\nrequired fields for most implementations. Others are marked as\n:class:`~astm.mapping.NotUsedField` and should be defined explicitly for your\nASTM realisation.\n\"\"\"",
"from datetime import date... |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
#: ASTM specification base encoding.
ENCODING = 'latin-1'
#: Message start token.
STX = b'\x02'
#: Message end token.
ETX = b'\x03'
#: ASTM session termination token.
EOT = b'\x04'
#: ASTM session initialization token.
ENQ = b'\x05'
#: Command accepted token.
ACK = b'\x06'
#: Command rejected token.
NAK = b'\x15'
#: Message chunk end token.
ETB = b'\x17'
LF = b'\x0A'
CR = b'\x0D'
#: CR + LF shortcut.
CRLF = CR + LF
#: Message records delimiter.
RECORD_SEP = b'\x0D' # \r #
#: Record fields delimiter.
FIELD_SEP = b'\x7C' # | #
#: Delimeter for repeated fields.
REPEAT_SEP = b'\x5C' # \ #
#: Field components delimiter.
COMPONENT_SEP = b'\x5E' # ^ #
#: Date escape token.
ESCAPE_SEP = b'\x26' # & #
| [
[
14,
0,
0.2683,
0.0244,
0,
0.66,
0,
1,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.3415,
0.0244,
0,
0.66,
0.0667,
693,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.3902,
0.0244,
0,
0.6... | [
"ENCODING = 'latin-1'",
"STX = b'\\x02'",
"ETX = b'\\x03'",
"EOT = b'\\x04'",
"ENQ = b'\\x05'",
"ACK = b'\\x06'",
"NAK = b'\\x15'",
"ETB = b'\\x17'",
"LF = b'\\x0A'",
"CR = b'\\x0D'",
"CRLF = CR + LF",
"RECORD_SEP = b'\\x0D' # \\r #",
"FIELD_SEP = b'\\x7C' # | #",
"REPEAT_SEP ... |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
"""
``astm.omnilab.server`` - LabOnline server implementation
----------------------------------------------------------
"""
from astm.server import BaseRecordsDispatcher
from astm.mapping import (
Component, ConstantField, ComponentField, DateTimeField, IntegerField,
SetField, TextField, NotUsedField, DateField
)
from .common import (
Header, Terminator, CommonPatient as Patient,
CommonOrder,
CommonResult,
CommonComment,
Sender
)
__all__ = ['RecordsDispatcher',
'Header', 'Patient', 'Order', 'Result', 'Terminator',
'CommentData', 'CompletionDate', 'Instrument', 'Operator',
'Sender', 'Test']
#: Instrument (analyser) information structure.
#:
#: :param _: Reserved. Not used.
#: :type _: None
#:
#: :param rack: Rack number. Length: 5.
#: :type rack: str
#:
#: :param position: Position number. Length: 3.
#: :type position: str
#:
Instrument = Component.build(
TextField(name='_'),
TextField(name='rack', length=5),
TextField(name='position', length=3),
)
#: Test :class:`~astm.mapping.Component` also known as Universal Test ID.
#:
#: :param _: Reserved. Not used.
#: :type _: None
#:
#: :param __: Reserved. Not used.
#: :type __: None
#:
#: :param ___: Reserved. Not used.
#: :type ___: None
#:
#: :param assay_code: Assay code. Required. Length: 20.
#: :type assay_code: str
#:
#: :param assay_name: Assay name. Length: 8.
#: :type assay_name: str
#:
#: :param dilution: Dilution. Length: 10.
#: :type dilution: str
#:
#: :param status: Assay status. Length: 1.
#: :type status: str
#:
#: :param reagent_lot: Reagent lot. Length: 15.
#: :type reagent_lot: str
#:
#: :param reagent_number: Reagent serial number. Length: 5.
#: :type reagent_number: str
#:
#: :param control_lot: Control lot number. Length: 25.
#: :type control_lot: str
#:
#: :param type: Result type value. One of: ``CE``, ``TX``.
#: :type type: str
#:
Test = Component.build(
NotUsedField(name='_'),
NotUsedField(name='__'),
NotUsedField(name='___'),
TextField(name='assay_code', required=True, length=20),
TextField(name='assay_name', length=8),
TextField(name='dilution', length=10),
TextField(name='status', length=1),
TextField(name='reagent_lot', length=15),
TextField(name='reagent_number', length=5),
TextField(name='control_lot', length=25),
SetField(name='type', values=('CE', 'TX'))
)
#: Information about operator that validated results.
#:
#: :param code_on_labonline: Operator code on LabOnline. Length: 12.
#: :type code_on_labonline: str
#:
#: :param code_on_analyzer: Operator code on analyser. Length: 20.
#: :type code_on_analyzer: str
#:
Operator = Component.build(
TextField(name='code_on_labonline', length=12),
TextField(name='code_on_analyzer', length=20),
)
#: Completion date time information.
#:
#: :param labonline: Completion date time on LabOnline.
#: :type labonline: datetime.datetime
#:
#: :param analyzer: Completion date time on analyser.
#: :type analyzer: datetime.datetime
#:
CompletionDate = Component.build(
DateTimeField(name='labonline'),
DateTimeField(name='analyzer'),
)
#: Instrument (analyser) information structure.
#:
#: :param _: Reserved. Not used.
#: :type _: None
#:
#: :param rack: Rack number. Length: 5.
#: :type rack: str
#:
#: :param position: Position number. Length: 3.
#: :type position: str
#:
Instrument = Component.build(
NotUsedField(name='_'),
TextField(name='rack', length=5),
TextField(name='position', length=3),
)
#: Comment control text structure.
#:
CommentData = Component.build(
SetField(name='code', values=('PC', 'RC', 'SC', 'TC',
'CK', 'SE', 'CL', 'TA', 'SS', 'HQ', 'AL', 'PT')),
TextField(name='value'),
TextField(name='field_1'),
TextField(name='field_2'),
TextField(name='field_3'),
TextField(name='field_4'),
TextField(name='field_5'),
)
class Order(CommonOrder):
"""ASTM order record.
:param type: Record Type ID. Always ``O``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param sample_id: Sample ID number. Required. Length: 12.
:type sample_id: str
:param instrument: Instrument specimen ID.
:type instrument: :class:`Instrument`
:param test: Test information structure (aka Universal Test ID).
:type test: :class:`Test`
:param priority: Priority flag. Required. Possible values:
- ``S``: stat; -``R``: routine.
:type priority: str
:param created_at: Ordered date and time. Required.
:type created_at: datetime.datetime
:param sampled_at: Specimen collection date and time.
:type sampled_at: datetime.datetime
:param collected_at: Collection end time. Not used.
:type collected_at: None
:param volume: Collection volume. Not used.
:type volume: None
:param collector: Collector ID. Not used.
:type collector: None
:param action_code: Action code. Required. Possible values:
- :const:`None`: normal order result;
- ``Q``: quality control;
:type action_code: str
:param danger_code: Danger code. Not used.
:type danger_code: None
:param clinical_info: Revelant clinical info. Not used.
:type clinical_info: None
:param delivered_at: Date/time specimen received.
:type delivered_at: None
:param biomaterial: Sample material code. Length: 20.
:type biomaterial: str
:param physician: Ordering Physician. Not used.
:type physician: None
:param physician_phone: Physician's phone number. Not used.
:type physician_phone: None
:param user_field_1: An optional field, it will be send back unchanged to
the host along with the result. Length: 20.
:type user_field_1: str
:param user_field_2: An optional field, it will be send back unchanged to
the host along with the result. Length: 1024.
:type user_field_2: str
:param laboratory_field_1: Laboratory field #1. Not used.
:type laboratory_field_1: None
:param laboratory_field_2: Primary tube code. Length: 12.
:type laboratory_field_2: str
:param modified_at: Date and time of last result modification. Not used.
:type modified_at: None
:param instrument_charge: Instrument charge to computer system. Not used.
:type instrument_charge: None
:param instrument_section: Instrument section id. Not used.
:type instrument_section: None
:param report_type: Report type. Always ``F`` which means final order
request.
:type report_type: str
:param reserved: Reserved. Not used.
:type reserved: None
:param location_ward: Location ward of specimen collection. Not used.
:type location_ward: None
:param infection_flag: Nosocomial infection flag. Not used.
:type infection_flag: None
:param specimen_service: Specimen service. Not used.
:type specimen_service: None
:param laboratory: Production laboratory. Not used.
:type laboratory: None
"""
action_code = SetField(values=(None, 'Q'))
instrument = ComponentField(Instrument)
report_type = ConstantField(default='F')
test = ComponentField(Test)
class Result(CommonResult):
"""ASTM patient record.
:param type: Record Type ID. Always ``R``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param test: Test information structure (aka Universal Test ID).
:type test: :class:`Test`
:param value: Measurement value. Numeric, coded or free text value
depending on result type. Required. Length: 1024.
:type value: None
:param units: Units. Length: 20.
:type units: str
:param references: Normal reference value interval.
:type references: str
:param abnormal_flag: Result abnormal flag. Possible values:
- ``0``: normal result;
- ``1``: result out of normal values;
- ``2``: result out of attention values;
- ``3``: result out of panic values;
+10 Delta-check;
+1000 Device alarm.
Length: 4.
:type abnormal_flag: str
:param abnormality_nature: Nature of abnormality testing. Possible values:
- ``N``: normal value;
- ``L``: below low normal range;
- ``H``: above high normal range;
- ``LL``: below low critical range;
- ``HH``: above high critical range.
:type abnormality_nature: str
:param status: Result status. ``F`` indicates a final result;
``R`` indicating rerun. Length: 1.
:type status: str
:param normatives_changed_at: Date of changes in instrument normative
values or units. Not used.
:type normatives_changed_at: None
:param operator: Operator ID.
:type operator: :class:`Operator`
:param started_at: When works on test was started on.
:type started_at: datetime.datetime
:param completed_at: When works on test was done.
:type completed_at: datetime.datetime
:param instrument: Instrument ID. Required.
:type instrument: :class:`Instrument`
"""
abnormal_flag = SetField(
field=IntegerField(),
length=4,
values=(0, 1, 2, 3,
10, 11, 12, 13,
1000, 1001, 1002, 1003,
1010, 1011, 1012, 1013))
abnormality_nature = SetField(values=('N', 'L', 'H', 'LL', 'HH'))
completed_at = ComponentField(CompletionDate)
created_at = DateField()
instrument = TextField(length=16)
operator = ComponentField(Operator)
references = TextField()
sampled_at = DateField()
started_at = DateTimeField(required=True)
status = SetField(values=('F', 'R'))
test = ComponentField(Test)
units = TextField(length=20)
class Comment(CommonComment):
"""ASTM patient record.
:param type: Record Type ID. Always ``C``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param source: Comment source. Always ``I``.
:type source: str
:param data: Measurement value. Numeric, coded or free text value
depending on result type. Required. Length: 1024.
:type data: :class:`CommentData`
:param ctype: Comment type. Always ``G``.
:type ctype: str
"""
source = ConstantField(default='I')
data = ComponentField(CommentData)
class RecordsDispatcher(BaseRecordsDispatcher):
"""Omnilab specific records dispatcher. Automatically wraps records by
related mappings."""
def __init__(self, *args, **kwargs):
super(RecordsDispatcher, self).__init__(*args, **kwargs)
self.wrappers = {
'H': Header,
'P': Patient,
'O': Order,
'R': Result,
'C': Comment,
'L': Terminator
}
| [
[
8,
0,
0.0325,
0.0156,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0442,
0.0026,
0,
0.66,
0.0714,
244,
0,
1,
0,
0,
244,
0,
0
],
[
1,
0,
0.0506,
0.0104,
0,
0.66... | [
"\"\"\"\n\n``astm.omnilab.server`` - LabOnline server implementation\n----------------------------------------------------------\n\n\"\"\"",
"from astm.server import BaseRecordsDispatcher",
"from astm.mapping import (\n Component, ConstantField, ComponentField, DateTimeField, IntegerField,\n SetField, Tex... |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
"""
``astm.omnilab.client`` - LabOnline client implementation
----------------------------------------------------------
"""
from astm.mapping import (
Component, ConstantField, ComponentField, IntegerField, DateTimeField,
RepeatedComponentField, SetField, TextField, NotUsedField
)
from .common import (
Header, Terminator,
CommonPatient,
CommonOrder,
CommonResult,
CommonComment,
Sender
)
__all__ = ['Header', 'Patient', 'Order', 'Result', 'Comment', 'Terminator',
'CommentData', 'PatientAge', 'Sender', 'Test']
#: Patient age structure.
#:
#: :param value: Age value.
#: :type value: int
#:
#: :param unit: Age unit. One of: ``years``, ``months``, ``days``.
#: :type unit: str
#:
PatientAge = Component.build(
IntegerField(name='value'),
SetField(name='unit', values=('years', 'months', 'days'))
)
#: Test :class:`~astm.mapping.Component` also known as Universal Test ID.
#:
#: :param _: Reserved. Not used.
#: :type _: None
#:
#: :param __: Reserved. Not used.
#: :type __: None
#:
#: :param ___: Reserved. Not used.
#: :type ___: None
#:
#: :param assay_code: Assay code. Required. Length: 20.
#: :type assay_code: str
#:
#: :param assay_name: Assay name. Length: 8.
#: :type assay_name: str
#:
Test = Component.build(
NotUsedField(name='_'),
NotUsedField(name='__'),
NotUsedField(name='___'),
TextField(name='assay_code', required=True, length=20),
TextField(name='assay_name', length=8),
)
#: Comment control data structure.
#:
CommentData = Component.build(
SetField(name='code', values=('PC', 'RC', 'SC', 'TC')),
TextField(name='value')
)
class Patient(CommonPatient):
"""ASTM patient record.
:param type: Record Type ID. Always ``P``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param practice_id: Practice Assigned Patient ID. Required. Length: 12.
:type practice_id: str
:param laboratory_id: Laboratory Assigned Patient ID. Required. Length: 16.
:type laboratory_id: str
:param id: Patient ID. Not used.
:type id: None
:param name: Patient name.
:type name: :class:`PatientName`
:param maiden_name: Mother’s Maiden Name. Not used.
:type maiden_name: None
:param birthdate: Birthdate.
:type birthdate: datetime.date
:param sex: Patient Sex. One of: ``M`` (male), ``F`` (female),
``I`` (animal), ``None`` is unknown.
:type sex: str
:param race: Patient Race-Ethnic Origin. Not used.
:type race: None
:param address: Patient Address. Not used.
:type address: None
:param reserved: Reserved Field. Not used.
:type reserved: None
:param phone: Patient Telephone Number. Not used.
:type phone: None
:param physician_id: Attending Physician. Not used.
:type physician_id: None
:param special_1: Special Field #1. Not used.
:type special_1: None
:param special_2: Patient source. Possible values:
- ``0``: internal patient;
- ``1``: external patient.
:type special_2: int
:param height: Patient Height. Not used.
:type height: None
:param weight: Patient Weight. Not used.
:type weight: None
:param diagnosis: Patient’s Known Diagnosis. Not used.
:type diagnosis: None
:param medications: Patient’s Active Medications. Not used.
:type medications: None
:param diet: Patient’s Diet. Not used.
:type diet: None
:param practice_1: Practice Field No. 1. Not used.
:type practice_1: None
:param practice_2: Practice Field No. 2. Not used.
:type practice_2: None
:param admission_date: Admission/Discharge Dates. Not used.
:type admission_date: None
:param admission_status: Admission Status. Not used.
:type admission_status: None
:param location: Patient location. Length: 20.
:type location: str
:param diagnostic_code_nature: Nature of diagnostic code. Not used.
:type diagnostic_code_nature: None
:param diagnostic_code: Diagnostic code. Not used.
:type diagnostic_code: None
:param religion: Patient religion. Not used.
:type religion: None
:param martial_status: Martian status. Not used.
:type martial_status: None
:param isolation_status: Isolation status. Not used.
:type isolation_status: None
:param language: Language. Not used.
:type language: None
:param hospital_service: Hospital service. Not used.
:type hospital_service: None
:param hospital_institution: Hospital institution. Not used.
:type hospital_institution: None
:param dosage_category: Dosage category. Not used.
:type dosage_category: None
"""
physician_id = TextField(length=35)
special_1 = ComponentField(PatientAge)
class Order(CommonOrder):
"""ASTM order record.
:param type: Record Type ID. Always ``O``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param sample_id: Sample ID number. Required. Length: 12.
:type sample_id: str
:param instrument: Instrument specimen ID. Not used.
:type instrument: None
:param test: Test information structure (aka Universal Test ID).
:type test: :class:`Test`
:param priority: Priority flag. Required. Possible values:
- ``S``: stat; -``R``: routine.
:type priority: str
:param created_at: Ordered date and time. Required.
:type created_at: datetime.datetime
:param sampled_at: Specimen collection date and time.
:type sampled_at: datetime.datetime
:param collected_at: Collection end time. Not used.
:type collected_at: None
:param volume: Collection volume. Not used.
:type volume: None
:param collector: Collector ID. Not used.
:type collector: None
:param action_code: Action code. Required. Possible values:
- ``C``: cancel works for specified tests;
- ``A``: add tests to existed specimen;
- ``N``: create new order;
- ``R``: rerun tests for specified order;
:type action_code: str
:param danger_code: Danger code. Not used.
:type danger_code: None
:param clinical_info: Revelant clinical info. Not used.
:type clinical_info: None
:param delivered_at: Date/time specimen received.
:type delivered_at: None
:param biomaterial: Sample material code. Length: 20.
:type biomaterial: str
:param physician: Ordering Physician. Not used.
:type physician: None
:param physician_phone: Physician's phone number. Not used.
:type physician_phone: None
:param user_field_1: An optional field, it will be send back unchanged to
the host along with the result. Length: 20.
:type user_field_1: str
:param user_field_2: An optional field, it will be send back unchanged to
the host along with the result. Length: 1024.
:type user_field_2: str
:param laboratory_field_1: In multi-laboratory environment it will be used
to indicate which laboratory entering the order.
Length: 20.
:type laboratory_field_1: str
:param laboratory_field_2: Primary tube code. Length: 12.
:type laboratory_field_2: str
:param modified_at: Date and time of last result modification. Not used.
:type modified_at: None
:param instrument_charge: Instrument charge to computer system. Not used.
:type instrument_charge: None
:param instrument_section: Instrument section id. Not used.
:type instrument_section: None
:param report_type: Report type. Always ``O`` which means normal order
request.
:type report_type: str
:param reserved: Reserved. Not used.
:type reserved: None
:param location_ward: Location ward of specimen collection. Not used.
:type location_ward: None
:param infection_flag: Nosocomial infection flag. Not used.
:type infection_flag: None
:param specimen_service: Specimen service. Not used.
:type specimen_service: None
:param laboratory: Production laboratory: in multi-laboratory environment
indicates laboratory expected to process the order.
Length: 20.
:type laboratory: str
"""
action_code = SetField(default='N', values=('C', 'A', 'N', 'R'))
created_at = DateTimeField(required=True)
laboratory = TextField(length=20)
laboratory_field_1 = TextField(length=20)
report_type = ConstantField(default='O')
sampled_at = DateTimeField()
test = RepeatedComponentField(Test)
class Result(CommonResult):
"""ASTM patient record.
:param type: Record Type ID. Always ``R``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param test: Test information structure (aka Universal Test ID).
:type test: :class:`Test`
:param value: Measurement value. Numeric, coded or free text value
depending on result type. Required. Length: 1024.
:type value: None
:param units: Units. Not used.
:type units: None
:param references: Reference ranges. Not used.
:type references: None
:param abnormal_flag: Result abnormal flag. Not used.
:type abnormal_flag: None
:param abnormality_nature: Nature of abnormality testing. Not used.
:type abnormality_nature: None
:param status: Result status. Not used.
:type status: None
:param normatives_changed_at: Date of changes in instrument normative
values or units. Not used.
:type normatives_changed_at: None
:param operator: Operator ID. Not used.
:type operator: None
:param started_at: When works on test was started on. Not used.
:type started_at: None
:param completed_at: When works on test was done. Required.
:type completed_at: datetime.datetime
:param instrument: Instrument ID. Not used.
:type instrument: None
"""
test = ComponentField(Test)
class Comment(CommonComment):
"""ASTM patient record.
:param type: Record Type ID. Always ``C``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param source: Comment source. Always ``L``.
:type source: str
:param data: Measurement value. Numeric, coded or free text value
depending on result type. Required. Length: 1024.
:type data: :class:`CommentData`
:param ctype: Comment type. Always ``G``.
:type ctype: str
"""
source = ConstantField(default='L')
data = ComponentField(CommentData)
| [
[
8,
0,
0.0328,
0.0157,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0486,
0.0105,
0,
0.66,
0.1,
791,
0,
9,
0,
0,
791,
0,
0
],
[
1,
0,
0.0643,
0.021,
0,
0.66,
... | [
"\"\"\"\n\n``astm.omnilab.client`` - LabOnline client implementation\n----------------------------------------------------------\n\n\"\"\"",
"from astm.mapping import (\n Component, ConstantField, ComponentField, IntegerField, DateTimeField,\n RepeatedComponentField, SetField, TextField, NotUsedField\n)",
... |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from . import client
from . import server
| [
[
1,
0,
0.9091,
0.0909,
0,
0.66,
0,
0,
0,
1,
0,
0,
0,
0,
0
],
[
1,
0,
1,
0.0909,
0,
0.66,
1,
0,
0,
1,
0,
0,
0,
0,
0
]
] | [
"from . import client",
"from . import server"
] |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from astm import __version__
from astm.mapping import (
Component, ConstantField, ComponentField, DateField, DateTimeField,
IntegerField, SetField, TextField
)
from astm.records import (
HeaderRecord, PatientRecord, OrderRecord, ResultRecord, CommentRecord,
TerminatorRecord
)
#: Information about sender.
#:
#: :param name: Name.
#: :type name: str
#:
#: :param version: Sender software version.
#: :type version: str
#:
Sender = Component.build(
TextField(name='name', default='python-astm'),
TextField(name='version', default=__version__)
)
#: Patient name structure.
#:
#: :param last: Last name. Length: 50.
#: :type last: str
#:
#: :param first: First name. Length: 50.
#: :type first: str
#:
PatientName = Component.build(
TextField(name='last', length=50),
TextField(name='first', length=50)
)
class Header(HeaderRecord):
"""ASTM header record.
:param type: Record Type ID. Always ``H``.
:type type: str
:param delimeter: Delimiter Definition. Always ``\^&``.
:type delimeter: str
:param message_id: Message Control ID. Not used.
:type message_id: None
:param password: Access Password. Not used.
:type password: None
:param sender: Information about sender. Optional.
:type sender: :class:`Sender`
:param address: Sender Street Address. Not used.
:type address: None
:param reserved: Reserved Field. Not used.
:type reserved: None
:param phone: Sender Telephone Number. Not used.
:type phone: None
:param chars: Sender Characteristics. Not used.
:type chars: None
:param receiver: Information about receiver. Not used.
:type receiver: None
:param comments: Comments. Not used.
:type comments: None
:param processing_id: Processing ID. Always ``P``.
:type processing_id: str
:param version: ASTM Version Number. Always ``E 1394-97``.
:type version: str
:param timestamp: Date and Time of Message
:type timestamp: datetime.datetime
"""
sender = ComponentField(Sender)
processing_id = ConstantField(default='P')
version = ConstantField(default='E 1394-97')
class CommonPatient(PatientRecord):
"""ASTM patient record.
:param type: Record Type ID. Always ``P``.
:type type: str
:param seq: Sequence Number. Required.
:type seq: int
:param practice_id: Practice Assigned Patient ID. Required. Length: 12.
:type practice_id: str
:param laboratory_id: Laboratory Assigned Patient ID. Required. Length: 16.
:type laboratory_id: str
:param id: Patient ID. Not used.
:type id: None
:param name: Patient name.
:type name: :class:`PatientName`
:param maiden_name: Mother’s Maiden Name. Not used.
:type maiden_name: None
:param birthdate: Birthdate.
:type birthdate: datetime.date
:param sex: Patient Sex. One of: ``M`` (male), ``F`` (female),
``I`` (animal), ``None`` is unknown.
:type sex: str
:param race: Patient Race-Ethnic Origin. Not used.
:type race: None
:param address: Patient Address. Not used.
:type address: None
:param reserved: Reserved Field. Not used.
:type reserved: None
:param phone: Patient Telephone Number. Not used.
:type phone: None
:param physician_id: Attending Physician. Not used.
:type physician_id: None
:param special_1: Special Field #1. Not used.
:type special_1: None
:param special_2: Patient source. Possible values:
- ``0``: internal patient;
- ``1``: external patient.
:type special_2: int
:param height: Patient Height. Not used.
:type height: None
:param weight: Patient Weight. Not used.
:type weight: None
:param diagnosis: Patient’s Known Diagnosis. Not used.
:type diagnosis: None
:param medications: Patient’s Active Medications. Not used.
:type medications: None
:param diet: Patient’s Diet. Not used.
:type diet: None
:param practice_1: Practice Field No. 1. Not used.
:type practice_1: None
:param practice_2: Practice Field No. 2. Not used.
:type practice_2: None
:param admission_date: Admission/Discharge Dates. Not used.
:type admission_date: None
:param admission_status: Admission Status. Not used.
:type admission_status: None
:param location: Patient location. Length: 20.
:type location: str
:param diagnostic_code_nature: Nature of diagnostic code. Not used.
:type diagnostic_code_nature: None
:param diagnostic_code: Diagnostic code. Not used.
:type diagnostic_code: None
:param religion: Patient religion. Not used.
:type religion: None
:param martial_status: Martian status. Not used.
:type martial_status: None
:param isolation_status: Isolation status. Not used.
:type isolation_status: None
:param language: Language. Not used.
:type language: None
:param hospital_service: Hospital service. Not used.
:type hospital_service: None
:param hospital_institution: Hospital institution. Not used.
:type hospital_institution: None
:param dosage_category: Dosage category. Not used.
:type dosage_category: None
"""
birthdate = DateField()
laboratory_id = TextField(required=True, length=16)
location = TextField(length=20)
name = ComponentField(PatientName)
practice_id = TextField(required=True, length=12)
sex = SetField(values=('M', 'F', None, 'I'))
special_2 = SetField(values=(0, 1), field=IntegerField())
class CommonOrder(OrderRecord):
biomaterial = TextField(length=20)
laboratory_field_2 = TextField(length=12)
priority = SetField(default='S', values=('S', 'R'))
sample_id = TextField(required=True, length=12)
user_field_1 = TextField(length=20)
user_field_2 = TextField(length=1024)
class CommonResult(ResultRecord):
completed_at = DateTimeField(required=True)
value = TextField(required=True, length=20)
class CommonComment(CommentRecord):
ctype = ConstantField(default='G')
class Terminator(TerminatorRecord):
"""ASTM terminator record.
:param type: Record Type ID. Always ``L``.
:type type: str
:param seq: Sequential number. Always ``1``.
:type seq: int
:param code: Termination code. Always ``N``.
:type code: str
"""
| [
[
1,
0,
0.0403,
0.004,
0,
0.66,
0,
367,
0,
1,
0,
0,
367,
0,
0
],
[
1,
0,
0.0504,
0.0161,
0,
0.66,
0.1,
791,
0,
8,
0,
0,
791,
0,
0
],
[
1,
0,
0.0665,
0.0161,
0,
0.66... | [
"from astm import __version__",
"from astm.mapping import (\n Component, ConstantField, ComponentField, DateField, DateTimeField,\n IntegerField, SetField, TextField\n)",
"from astm.records import (\n HeaderRecord, PatientRecord, OrderRecord, ResultRecord, CommentRecord,\n TerminatorRecord\n)",
"S... |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from .version import __version__, __version_info__
from .exceptions import BaseASTMError, NotAccepted, InvalidState
from .codec import (
decode, decode_message, decode_record,
encode, encode_message, encode_record,
make_checksum
)
from .mapping import Record, Component
from .records import (
HeaderRecord, PatientRecord, OrderRecord,
ResultRecord, CommentRecord, TerminatorRecord
)
from .protocol import ASTMProtocol
from .client import Client
from .server import RequestHandler, Server
import logging
log = logging.getLogger()
class NullHandler(logging.Handler):
def emit(self, *args, **kwargs):
pass
log.addHandler(NullHandler())
| [
[
1,
0,
0.303,
0.0303,
0,
0.66,
0,
623,
0,
2,
0,
0,
623,
0,
0
],
[
1,
0,
0.3333,
0.0303,
0,
0.66,
0.0909,
63,
0,
3,
0,
0,
63,
0,
0
],
[
1,
0,
0.4242,
0.1515,
0,
0.6... | [
"from .version import __version__, __version_info__",
"from .exceptions import BaseASTMError, NotAccepted, InvalidState",
"from .codec import (\n decode, decode_message, decode_record,\n encode, encode_message, encode_record,\n make_checksum\n)",
"from .mapping import Record, Component",
"from .rec... |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import sys
version = '.'.join(map(str, sys.version_info[:2]))
if version >= '3.0':
basestring = (str, bytes)
unicode = str
bytes = bytes
long = int
def buffer(obj, start=None, stop=None):
memoryview(obj)
if start == None:
start = 0
if stop == None:
stop = len(obj)
x = obj[start:stop]
return x
else:
basestring = basestring
unicode = unicode
b = bytes = str
long = long
buffer = buffer
b = lambda s: isinstance(s, unicode) and s.encode('latin1') or s
u = lambda s: isinstance(s, bytes) and s.decode('utf-8') or s
| [
[
1,
0,
0.2857,
0.0286,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
14,
0,
0.3429,
0.0286,
0,
0.66,
0.25,
623,
3,
1,
0,
0,
933,
10,
2
],
[
4,
0,
0.6571,
0.5429,
0,
... | [
"import sys",
"version = '.'.join(map(str, sys.version_info[:2]))",
"if version >= '3.0':\n basestring = (str, bytes)\n unicode = str\n bytes = bytes\n long = int\n def buffer(obj, start=None, stop=None):\n memoryview(obj)\n if start == None:",
" basestring = (str, bytes)",
"... |
#====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
#
# This software consists of voluntary contributions made by many
# individuals on behalf of the Apache Software Foundation. For more
# information on the Apache Software Foundation, please see
# <http://www.apache.org/>.
#
import os
import re
import tempfile
import shutil
ignore_pattern = re.compile('^(.svn|target|bin|classes)')
java_pattern = re.compile('^.*\.java')
annot_pattern = re.compile('import org\.apache\.http\.annotation\.')
def process_dir(dir):
files = os.listdir(dir)
for file in files:
f = os.path.join(dir, file)
if os.path.isdir(f):
if not ignore_pattern.match(file):
process_dir(f)
else:
if java_pattern.match(file):
process_source(f)
def process_source(filename):
tmp = tempfile.mkstemp()
tmpfd = tmp[0]
tmpfile = tmp[1]
try:
changed = False
dst = os.fdopen(tmpfd, 'w')
try:
src = open(filename)
try:
for line in src:
if annot_pattern.match(line):
changed = True
line = line.replace('import org.apache.http.annotation.', 'import net.jcip.annotations.')
dst.write(line)
finally:
src.close()
finally:
dst.close();
if changed:
shutil.move(tmpfile, filename)
else:
os.remove(tmpfile)
except:
os.remove(tmpfile)
process_dir('.')
| [
[
1,
0,
0.3514,
0.0135,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.3649,
0.0135,
0,
0.66,
0.1111,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.3784,
0.0135,
0,
... | [
"import os",
"import re",
"import tempfile",
"import shutil",
"ignore_pattern = re.compile('^(.svn|target|bin|classes)')",
"java_pattern = re.compile('^.*\\.java')",
"annot_pattern = re.compile('import org\\.apache\\.http\\.annotation\\.')",
"def process_dir(dir):\n files = os.listdir(dir)\n for... |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| [
[
1,
0,
0.1818,
0.0909,
0,
0.66,
0,
879,
0,
1,
0,
0,
879,
0,
0
],
[
7,
0,
0.5,
0.5455,
0,
0.66,
0.5,
0,
0,
1,
0,
0,
0,
0,
2
],
[
1,
1,
0.3636,
0.0909,
1,
0.98,
... | [
"from django.core.management import execute_manager",
"try:\n import settings # Assumed to be in the same directory.\nexcept ImportError:\n import sys\n sys.stderr.write(\"Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\\nYou'll have to run dj... |
import os
import sys
import logging
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
# Google App Engine imports.
from google.appengine.ext.webapp import util
# Force Django to reload its settings.
from django.conf import settings
settings._target = None
import django.core.handlers.wsgi
import django.core.signals
import django.db
import django.dispatch.dispatcher
logging.getLogger().setLevel(logging.DEBUG)
django.dispatch.dispatcher.disconnect(
django.db._rollback_on_exception,
django.core.signals.got_request_exception)
def main():
application = django.core.handlers.wsgi.WSGIHandler()
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
[
1,
0,
0.0323,
0.0323,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0645,
0.0323,
0,
0.66,
0.0667,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0968,
0.0323,
0,
... | [
"import os",
"import sys",
"import logging",
"os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'",
"sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))",
"from google.appengine.ext.webapp import util",
"from django.conf import settings",
"settings._target = None",
"import django.core.handl... |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| [
[
1,
0,
0.1818,
0.0909,
0,
0.66,
0,
879,
0,
1,
0,
0,
879,
0,
0
],
[
7,
0,
0.5,
0.5455,
0,
0.66,
0.5,
0,
0,
1,
0,
0,
0,
0,
2
],
[
1,
1,
0.3636,
0.0909,
1,
0.54,
... | [
"from django.core.management import execute_manager",
"try:\n import settings # Assumed to be in the same directory.\nexcept ImportError:\n import sys\n sys.stderr.write(\"Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\\nYou'll have to run dj... |
from django.conf.urls.defaults import *
urlpatterns = patterns('',
# Example:
# (r'^16x16/', include('16x16.foo.urls')),
# Uncomment this for admin:
(r'^send/', 'favicon.views.receiver'),
(r'^contrib/', 'favicon.views.contrib'),
(r'^toggler/', 'favicon.views.toggler'),
(r'^update/', 'favicon.views.update'),
(r'^top/', 'favicon.views.top_x'),
(r'^api/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/(?P<hour>\d{2})/(?P<min>\d{2})/(?P<sec>\d{2})/(?P<micro>\d+)/', 'favicon.views.api'),
(r'^image/(?P<id>.+)', 'favicon.views.image'),
(r'^toggle/(?P<id>.+)', 'favicon.views.toggle_active'),
(r'^$', 'favicon.views.index')
)
| [
[
1,
0,
0.0588,
0.0588,
0,
0.66,
0,
341,
0,
1,
0,
0,
341,
0,
0
],
[
14,
0,
0.5882,
0.8824,
0,
0.66,
1,
990,
3,
10,
0,
0,
75,
10,
1
]
] | [
"from django.conf.urls.defaults import *",
"urlpatterns = patterns('',\n # Example:\n # (r'^16x16/', include('16x16.foo.urls')),\n\n # Uncomment this for admin:\n (r'^send/', 'favicon.views.receiver'),\n (r'^contrib/', 'favicon.views.contrib'),\n (r'^toggler/', 'favicon.views.toggler'),"
] |
#from django.db import models
from google.appengine.ext import db
class Favicon(db.Model):
mimetype = db.StringProperty(required=True)
favicon_bytes = db.BlobProperty(required=True)
active = db.BooleanProperty(default=True)
accesses = db.IntegerProperty(default=0)
created_at = db.DateTimeProperty(auto_now_add=True)
modified_at = db.DateTimeProperty(auto_now=True)
class FaviconColor(db.Model):
favicon = db.ReferenceProperty(Favicon)
r = db.IntegerProperty(default=0)
g = db.IntegerProperty(default=0)
b = db.IntegerProperty(default=0)
class FaviconURI(db.Model):
uri = db.StringProperty(required=True)
favicon = db.ReferenceProperty(Favicon)
created_at = db.DateTimeProperty(auto_now_add=True)
modified_at = db.DateTimeProperty(auto_now=True)
class Client(db.Model):
client_id = db.StringProperty(required=True)
created_at = db.DateTimeProperty(auto_now_add=True)
modified_at = db.DateTimeProperty(auto_now=True)
class Access(db.Model):
created_at = db.DateTimeProperty(auto_now_add=True)
favicon = db.ReferenceProperty(Favicon)
favicon_uri = db.ReferenceProperty(FaviconURI)
client = db.ReferenceProperty(Client)
# stats objects
class CountStat(db.Expando):
count = db.IntegerProperty(default=0)
type = db.StringProperty()
since = db.DateTimeProperty()
class DateCountStat(db.Expando):
date = db.DateTimeProperty()
type = db.StringProperty()
count = db.IntegerProperty(default=0)
| [
[
1,
0,
0.0455,
0.0227,
0,
0.66,
0,
167,
0,
1,
0,
0,
167,
0,
0
],
[
3,
0,
0.1591,
0.1591,
0,
0.66,
0.1429,
148,
0,
0,
0,
0,
697,
0,
6
],
[
14,
1,
0.1136,
0.0227,
1,
... | [
"from google.appengine.ext import db",
"class Favicon(db.Model):\n mimetype = db.StringProperty(required=True)\n favicon_bytes = db.BlobProperty(required=True)\n active = db.BooleanProperty(default=True)\n accesses = db.IntegerProperty(default=0)\n created_at = db.DateTimeProperty(auto_now_add=True)\n modif... |
from google.appengine.ext import db
from google.appengine.ext.db import djangoforms
import django
from django import http
from django import shortcuts
from django.core import serializers
from favicon.models import Favicon, FaviconURI, Client, Access
# as soon as we have an imagine library......
| [
[
1,
0,
0.0909,
0.0909,
0,
0.66,
0,
167,
0,
1,
0,
0,
167,
0,
0
],
[
1,
0,
0.1818,
0.0909,
0,
0.66,
0.1667,
16,
0,
1,
0,
0,
16,
0,
0
],
[
1,
0,
0.3636,
0.0909,
0,
0.... | [
"from google.appengine.ext import db",
"from google.appengine.ext.db import djangoforms",
"import django",
"from django import http",
"from django import shortcuts",
"from django.core import serializers",
"from favicon.models import Favicon, FaviconURI, Client, Access"
] |
from favicon.models import Favicon, FaviconURI, Client, Access, CountStat, DateCountStat
from datetime import datetime
def inc_total_favicons():
total_favicons = CountStat.get_or_insert("total_favicons")
total_favicons.count += 1
total_favicons.put()
def get_total_favicons():
total_favicons = CountStat.get_by_key_name("total_favicons")
return total_favicons.count
def inc_total_accesses():
total_accesses = CountStat.get_or_insert("total_accesses")
total_accesses.count += 1
total_accesses.put()
def get_total_accesses():
total_accesses = CountStat.get_by_key_name("total_accesses")
return total_accesses.count
def inc_today_accesses():
today = datetime.today()
total_accesses_today = DateCountStat.get_or_insert("accesses_%04d%02d%02d" % (today.year, today.month, today.day), date=today)
total_accesses_today.type = "accesses"
total_accesses_today.count += 1
total_accesses_today.put()
def get_today_accesses():
today = datetime.today()
total_accesses_today = DateCountStat.get_by_key_name("accesses_%04d%02d%02d" % (today.year, today.month, today.day))
return total_accesses_today.count
def inc_today_favicons():
today = datetime.today()
total_favicons_today = DateCountStat.get_or_insert("favicons_%04d%02d%02d" % (today.year, today.month, today.day), date=today)
total_favicons_today.type = "favicons"
total_favicons_today.count += 1
total_favicons_today.put()
def get_today_favicons():
today = datetime.today()
total_favicons_today = DateCountStat.get_by_key_name("favicons_%04d%02d%02d" % (today.year, today.month, today.day))
return total_favicons_today.count
def get_num_favicons(num):
counts = DateCountStat.all().filter('type =', 'favicons').order('-date').fetch(num)
return [ str(cnt.count) for cnt in counts ]
def inc_today_updates():
today = datetime.today()
total_updates_today = DateCountStat.get_or_insert("updates_%04d%02d%02d" % (today.year, today.month, today.day), date=today)
total_updates_today.type = "updates"
total_updates_today.count += 1
total_updates_today.put()
def get_today_updates():
today = datetime.today()
total_updates_today = DateCountStat.get_by_key_name("updates_%04d%02d%02d" % (today.year, today.month, today.day))
return total_updates_today.count
# favicon fetch methods
def most_recently_accessed(num):
return Access.all().order('-created_at').fetch(num)
def most_accessed(num):
return Favicon.all().filter('active = ', True).order('-accesses').fetch(num)
def most_recently_added(num):
return Favicon.all().filter('active = ', True).order('-created_at').fetch(num)
def datetime_url(dt):
return "/api/%04d/%02d/%02d/%02d/%02d/%02d/%d/" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond)
# hack to make key_name begin with a letter
def md5_key(md5):
return 'a' + md5
| [
[
1,
0,
0.0128,
0.0128,
0,
0.66,
0,
211,
0,
6,
0,
0,
211,
0,
0
],
[
1,
0,
0.0256,
0.0128,
0,
0.66,
0.0588,
426,
0,
1,
0,
0,
426,
0,
0
],
[
2,
0,
0.0705,
0.0513,
0,
... | [
"from favicon.models import Favicon, FaviconURI, Client, Access, CountStat, DateCountStat",
"from datetime import datetime",
"def inc_total_favicons():\n total_favicons = CountStat.get_or_insert(\"total_favicons\")\n total_favicons.count += 1\n total_favicons.put()",
" total_favicons = CountStat.get_or_in... |
import logging
import sys
from google.appengine.ext import db
from google.appengine.ext.db import djangoforms
import django
from django import http
from django import shortcuts
from datetime import datetime
from urllib import quote
from favicon.models import Favicon, FaviconURI, Client, Access, CountStat, DateCountStat
from favicon.helpers import *
def image(request, id):
mimetype = "image/png"
favicon = Favicon.get_by_key_name(id)
if favicon.mimetype: mimetype = favicon.mimetype
return http.HttpResponse(favicon.favicon_bytes, mimetype=mimetype)
def receiver(request):
params = {}
# logging.debug("file contents: " + str(request.FILES))
try:
if (len(request.FILES['favicon']['content']) > 51200):
logging.error("someone posted a favicon thats over 50kB (most are < 1kB), no thank you!")
return
# hack, key_name cannot begin with a digit
md5 = md5_key(request.POST['faviconMD5'][:32])
f = Favicon.get_or_insert(md5,
mimetype = request.FILES['favicon']['content-type'],
favicon_bytes = request.FILES['favicon']['content'])
# inc total_favicons
if not f.accesses:
inc_total_favicons()
inc_today_favicons()
f.accesses += 1
f.put()
inc_total_accesses()
inc_today_accesses()
fu = None
if request.POST.has_key('faviconURI'):
fu = FaviconURI(key_name = request.POST['faviconURI'],
uri = request.POST['faviconURI'],
favicon = f)
fu.put()
c = None
if request.POST.has_key('clientID'):
c = Client(key_name = request.POST['clientID'],
client_id = request.POST['clientID'])
c.put()
a = Access(favicon=f, favicon_uri=fu, client=c)
a.put()
except:
logging.error("Unexpected error: " + str(sys.exc_info()))
logging.error(request.FILES)
return shortcuts.render_to_response('index.html', params)
def contrib(request):
params = {}
return shortcuts.render_to_response('contrib.html', params)
def toggle_active(request, id):
favicon = Favicon.get_by_key_name(id)
favicon.active = not favicon.active
favicon.put()
return http.HttpResponseRedirect("/toggler/")
def toggler(request):
params = {}
favicons = Favicon.all().filter('active = ', True)
good_keys = [ f.key().id_or_name() for f in favicons ]
params['favicons_active'] = good_keys
favicons = Favicon.all().filter('active = ', False)
bad_keys = [ f.key().id_or_name() for f in favicons ]
params['favicons_disabled'] = bad_keys
return shortcuts.render_to_response('toggler.html', params)
# TODO: implement charts for data
def get_sparkline(data):
# first stab, not working
max_val = float(max(data))
vals = [ str( (d / max_val)*100 ) for d in data ]
data_string = ",".join(data)
img_string = "http://chart.apis.google.com/chart?chs=100x20&cht=ls&chco=0077CC&chm=B,E6F2FA,0,0,0&chls=1,0,0&chd=t:%s" % (data_string)
return img_string
def top_x(request):
params = {}
params['most_recently_created'] = [ f.key().id_or_name() for f in most_recently_added(10) ]
params['most_recently_accessed'] = [ f.favicon.key().id_or_name() for f in most_recently_accessed(10) ]
params['most_accessed'] = [ f.key().id_or_name() for f in most_accessed(10) ]
params['favicon_cnt'] = get_total_favicons()
params['favicon_today_cnt'] = get_today_favicons()
params['accesses_today_cnt'] = get_today_accesses()
return shortcuts.render_to_response('top.html', params)
def index(request):
params = {}
params['favicon_cnt'] = get_total_favicons()
params['accesses_cnt'] = get_total_accesses()
params['favicon_today_cnt'] = get_today_favicons()
params['accesses_today_cnt'] = get_today_accesses()
return shortcuts.render_to_response('gears.html', params)
def update(request):
inc_today_updates()
return http.HttpResponseRedirect('/update/update.rdf')
def api(request, year, month, day, hour, min, sec, micro):
params = {}
dt = datetime(int(year), int(month), int(day), int(hour), int(min), int(sec), int(micro))
favicons = Favicon.all().filter('active = ', True).filter('created_at > ', dt).order('created_at').fetch(1000)
if not favicons:
params['favicons'] = []
params['next_url'] = datetime_url(dt)
return shortcuts.render_to_response('api.html', params)
keys = [ quote(f.key().id_or_name()) for f in favicons ]
next_url = datetime_url(favicons[-1].created_at)
params['favicons'] = keys
params['next_url'] = next_url
return shortcuts.render_to_response('api.html', params)
| [
[
1,
0,
0.0073,
0.0073,
0,
0.66,
0,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0146,
0.0073,
0,
0.66,
0.05,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0219,
0.0073,
0,
0.... | [
"import logging",
"import sys",
"from google.appengine.ext import db",
"from google.appengine.ext.db import djangoforms",
"import django",
"from django import http",
"from django import shortcuts",
"from datetime import datetime",
"from urllib import quote",
"from favicon.models import Favicon, Fa... |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Zdenko Podobný
# Author: Zdenko Podobný
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple python demo script of tesseract-ocr 3.02 c-api
"""
import os
import sys
import ctypes
# Demo variables
lang = "eng"
filename = "../phototest.tif"
libpath = "/usr/local/lib64/"
libpath_w = "../vs2008/DLL_Release/"
TESSDATA_PREFIX = os.environ.get('TESSDATA_PREFIX')
if not TESSDATA_PREFIX:
TESSDATA_PREFIX = "../"
if sys.platform == "win32":
libname = libpath_w + "libtesseract302.dll"
libname_alt = "libtesseract302.dll"
os.environ["PATH"] += os.pathsep + libpath_w
else:
libname = libpath + "libtesseract.so.3.0.2"
libname_alt = "libtesseract.so.3"
try:
tesseract = ctypes.cdll.LoadLibrary(libname)
except:
try:
tesseract = ctypes.cdll.LoadLibrary(libname_alt)
except WindowsError, err:
print("Trying to load '%s'..." % libname)
print("Trying to load '%s'..." % libname_alt)
print(err)
exit(1)
tesseract.TessVersion.restype = ctypes.c_char_p
tesseract_version = tesseract.TessVersion()[:4]
# We need to check library version because libtesseract.so.3 is symlink
# and can point to other version than 3.02
if float(tesseract_version) < 3.02:
print("Found tesseract-ocr library version %s." % tesseract_version)
print("C-API is present only in version 3.02!")
exit(2)
api = tesseract.TessBaseAPICreate()
rc = tesseract.TessBaseAPIInit3(api, TESSDATA_PREFIX, lang);
if (rc):
tesseract.TessBaseAPIDelete(api)
print("Could not initialize tesseract.\n")
exit(3)
text_out = tesseract.TessBaseAPIProcessPages(api, filename, None , 0);
result_text = ctypes.string_at(text_out)
print result_text
| [
[
1,
0,
0.25,
0.25,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.5,
0.25,
0,
0.66,
0.5,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.75,
0.25,
0,
0.66,
1,
... | [
"import os",
"import sys",
"import ctypes"
] |
'''
Created on Oct 16, 2012
Refactored on Jul 4, 2013
@author: Nils Amiet
'''
import time
import math
from contextlib import contextmanager
@contextmanager
def timer():
'''Context manager used to wrap some code with a timer and print the execution time at the end'''
timer = Timer()
timer.start()
yield
timer.stop()
print(timer)
class Timer:
'''Class used to measure the execution time of some code'''
MILLIS_PER_SECOND = 1000
MILLIS_PER_MINUTE = MILLIS_PER_SECOND * 60
MILLIS_PER_HOUR = MILLIS_PER_MINUTE * 60
MILLIS_PER_DAY = MILLIS_PER_HOUR * 24
def __init__(self):
self.startMillis = 0
self.endMillis = 0
self.totalTimeMillis = 0
def start(self):
self.startMillis = int(time.time() * self.MILLIS_PER_SECOND)
def stop(self):
self.endMillis = int(time.time() * self.MILLIS_PER_SECOND)
self.totalTimeMillis = self.endMillis - self.startMillis
def __repr__(self):
tempTime = self.totalTimeMillis
days = math.floor(tempTime / self.MILLIS_PER_DAY)
tempTime -= days * self.MILLIS_PER_DAY
hours = math.floor(tempTime / self.MILLIS_PER_HOUR)
tempTime -= hours * self.MILLIS_PER_HOUR
minutes = math.floor(tempTime / self.MILLIS_PER_MINUTE)
tempTime -= minutes * self.MILLIS_PER_MINUTE
seconds = math.floor(tempTime / self.MILLIS_PER_SECOND)
tempTime -= seconds * self.MILLIS_PER_SECOND
millis = tempTime
timeString = "%s days, %s hours, %s minutes, %s seconds, %s millis."
timeString %= (days, hours, minutes, seconds, millis)
return timeString | [
[
8,
0,
0.0565,
0.0968,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.129,
0.0161,
0,
0.66,
0.2,
654,
0,
1,
0,
0,
654,
0,
0
],
[
1,
0,
0.1452,
0.0161,
0,
0.66,
... | [
"'''\nCreated on Oct 16, 2012\nRefactored on Jul 4, 2013\n\n@author: Nils Amiet\n'''",
"import time",
"import math",
"from contextlib import contextmanager",
"def timer():\n '''Context manager used to wrap some code with a timer and print the execution time at the end'''\n timer = Timer()\n timer.s... |
'''
Created on 11 juin 2013
@author: Nils Amiet
'''
from subprocess import Popen, PIPE
import os
class SentiStrength():
'''Wrapper class for SentiStrength java version'''
RUN_COMMAND = "java -jar"
SENTISTRENGTH_PATH = os.path.join(os.path.dirname(__file__), "SentiStrengthCom.jar")
DATA_PATH = os.path.join(os.path.dirname(__file__), "SentStrength_Data_Sept2011/")
def __init__(self):
pass
def classifiy(self, text):
commandArgs = "%s %s sentidata %s cmd" % (self.RUN_COMMAND, self.SENTISTRENGTH_PATH, self.DATA_PATH)
commandArgs = commandArgs.split(" ")
process = Popen(commandArgs, stdin=PIPE, stdout=PIPE, stderr=PIPE)
text = text.replace(" ", "+")
classification, dummy = process.communicate(text.encode("utf-8"))
polarities = classification.split("\n")
polarities = [self.polarity(line.strip()) for line in polarities]
polarities = [x for x in polarities if x is not None]
return polarities
def polarity(self, line):
val = line.split(" ")
try:
val = [int(x) for x in val]
except:
return None
return sum(val) | [
[
8,
0,
0.0732,
0.122,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1707,
0.0244,
0,
0.66,
0.3333,
394,
0,
2,
0,
0,
394,
0,
0
],
[
1,
0,
0.1951,
0.0244,
0,
0.66,... | [
"'''\nCreated on 11 juin 2013\n\n@author: Nils Amiet\n'''",
"from subprocess import Popen, PIPE",
"import os",
"class SentiStrength():\n '''Wrapper class for SentiStrength java version'''\n \n RUN_COMMAND = \"java -jar\"\n SENTISTRENGTH_PATH = os.path.join(os.path.dirname(__file__), \"SentiStrengt... |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
#from django.contrib import admin
#admin.autodiscover()
urlpatterns = patterns('ITInfluence.views',
# Examples:
# url(r'^$', 'InfrarougeTwitterInfluence.views.home', name='home'),
# url(r'^InfrarougeTwitterInfluence/', include('InfrarougeTwitterInfluence.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'index'),
url(r'^twitter/hashtags/$', 'twitterBrowseHashtags'),
url(r'^twitter/hashtag/(?P<hashtag>.+)/$', 'twitterHashtag'),
url(r'^twitter/$', 'twitterStats'),
url(r'^infrarouge/$', 'infrarougeStats'),
url(r'^twitter/tweets/$', 'twitterBrowseTweets'),
url(r'^twitter/collect/$', 'twitterShowCollectForm'),
url(r'^twitter/stopStreaming/$', 'twitterStopStreaming'),
url(r'^twitter/collect/toggleFollowersCollection/$', 'twitterToggleCollectingFollowers'),
url(r'^infrarouge/forums/$', 'getInfrarougeForums'),
url(r'^twitter/followers-ranking/$', 'twitterFollowersCountRanking'),
# infrarouge images
url(r'^infrarouge/images/ndi/$', 'getInfrarougeNDI'),
url(r'^infrarouge/images/ndi-time/$', 'getInfrarougeNDITimeFigure'),
url(r'^infrarouge/images/ndi-replies-count/$', 'getInfrarougeNDIReplyCountFigure'),
url(r'^infrarouge/images/replies-graph/$', 'getInfrarougeRepliesGraph'),
url(r'^infrarouge/images/user-discussion-graph/$', 'getInfrarougeUserDiscussionGraph'),
# twitter images
url(r'^twitter/images/ndi/$', 'getTwitterNDI'),
url(r'^twitter/images/ndi/(?P<hashtag>.+)/$', 'getTwitterNDISingleHashtag'),
url(r'^twitter/images/ndi-time/$', 'getTwitterNDITimeFigure'),
url(r'^twitter/images/ndi-replies-count/$', 'getTwitterNDIReplyCountFigure'),
url(r'^twitter/images/replies-graph/$', 'getTwitterRepliesGraph'),
url(r'^twitter/images/followers-graph/$', 'getTwitterFollowersGraph'),
# tools
url(r'^tools/showimage/(?P<path>.+)$', 'showImage'),
)
| [
[
1,
0,
0.0217,
0.0217,
0,
0.66,
0,
528,
0,
3,
0,
0,
528,
0,
0
],
[
14,
0,
0.5761,
0.8696,
0,
0.66,
1,
990,
3,
24,
0,
0,
75,
10,
24
]
] | [
"from django.conf.urls import patterns, include, url",
"urlpatterns = patterns('ITInfluence.views',\n # Examples:\n # url(r'^$', 'InfrarougeTwitterInfluence.views.home', name='home'),\n # url(r'^InfrarougeTwitterInfluence/', include('InfrarougeTwitterInfluence.foo.urls')),\n\n # Uncomment the admin/do... |
# Django settings for InfrarougeTwitterInfluence project.
import os
PROJECT_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.pardir))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'infrarouge': { # SQLite database for Infrarouge data
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_PATH, '../InfrarougeGrabber/infrarouge.db'),
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
},
'default': { # MySQL database for Twitter data
'ENGINE': 'django.db.backends.mysql',
'NAME': 'SocialInfluence',
'USER': 'infrarouge',
'PASSWORD': 'infrarouge',
'HOST': '127.0.0.1',
'PORT': '3306',
},
}
DATABASE_ROUTERS = [
'ITInfluence.DatabaseRouters.TwitterRouter',
]
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '(n1hz*0tl2p--qf@mz*7g6r%5z#lm*gx!-d9cu=ebu$ameht=^'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'InfrarougeTwitterInfluence.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'InfrarougeTwitterInfluence.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_PATH, "templates"),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'ITInfluence',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
[
1,
0,
0.0114,
0.0057,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
14,
0,
0.0229,
0.0057,
0,
0.66,
0.0357,
660,
3,
1,
0,
0,
45,
10,
3
],
[
14,
0,
0.0343,
0.0057,
0,
... | [
"import os",
"PROJECT_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.pardir))",
"DEBUG = False",
"TEMPLATE_DEBUG = DEBUG",
"ADMINS = (\n # ('Your Name', 'your_email@example.com'),\n)",
"MANAGERS = ADMINS",
"DATABASES = {\n 'infrarouge': { # SQLite database for Infrarouge d... |
"""
WSGI config for InfrarougeTwitterInfluence project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "InfrarougeTwitterInfluence.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "InfrarougeTwitterInfluence.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
[
8,
0,
0.25,
0.4688,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.5,
0.0312,
0,
0.66,
0.25,
688,
0,
1,
0,
0,
688,
0,
0
],
[
8,
0,
0.6875,
0.0312,
0,
0.66,
0... | [
"\"\"\"\nWSGI config for InfrarougeTwitterInfluence project.\n\nThis module contains the WSGI application used by Django's development server\nand any production WSGI deployments. It should expose a module-level variable\nnamed ``application``. Django's ``runserver`` and ``runfcgi`` commands discover\nthis applicat... |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "InfrarougeTwitterInfluence.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
[
1,
0,
0.2,
0.1,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.3,
0.1,
0,
0.66,
0.5,
509,
0,
1,
0,
0,
509,
0,
0
],
[
4,
0,
0.75,
0.6,
0,
0.66,
1,
0,
... | [
"import os",
"import sys",
"if __name__ == \"__main__\":\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"InfrarougeTwitterInfluence.settings\")\n\n from django.core.management import execute_from_command_line\n\n execute_from_command_line(sys.argv)",
" os.environ.setdefault(\"DJANGO_SETTINGS... |
'''
Created on 8 juin 2013
@author: Nils Amiet
'''
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import networkx as nx
import math
import random
from django.http.response import HttpResponse
class GraphPlotter():
'''Class used to plot networkx graphs with matplotlib'''
def __init__(self, dpi=100, width=1600, height=900):
self.dpi = dpi
self.width = width
self.height = height
def setFigureSize(self, figure):
w = int(self.width/self.dpi)
h = int(self.height/self.dpi)
figure.set_size_inches(w, h)
def bipartiteNodePositions(self, graph):
'''Compute layout with nice node positions for a bipartite graph'''
bipartiteAttributes = nx.get_node_attributes(graph, "bipartite")
partitionA = [node for node in graph.nodes() if bipartiteAttributes[node] is 0] # bipartite=0
partitionB = [node for node in graph.nodes() if bipartiteAttributes[node] is not 0] # bipartite=1
pos = {}
for node in partitionA:
xCoord = random.uniform(0, 0.2) # random position on the left side
yCoord = random.uniform(0, 1)
pos[node] = [xCoord, yCoord]
for node in partitionB:
xCoord = random.uniform(0.8, 1) # random position on the right side
yCoord = random.uniform(0, 1)
pos[node] = [xCoord, yCoord]
return pos
def memoryPlot(self, graph, bipartite=False, pos=None, nodeSizes=None, nodeColor='r', nodeLabel="Nodes", nodeLabel2="Nodes 2", edgeLabel="Edges"):
'''Plots the network using matplotlib'''
if pos is None:
# pos = nx.spring_layout(graph)
pos = nx.random_layout(graph)
nodeSize = 25
edgeWidth = 0.5
if nodeSizes is not None:
nodeSize = nodeSizes
figure = plt.figure()
rect = (0,0,1,1)
ax = figure.add_axes(rect)
matplotlib.pyplot.axis("off")
if bipartite:
bipartiteAttributes = nx.get_node_attributes(graph, "bipartite")
users = [node for node in graph.nodes() if bipartiteAttributes[node] is 0] # bipartite=0
discussions = [node for node in graph.nodes() if bipartiteAttributes[node] is not 0] # bipartite=1
nx.draw_networkx_nodes(graph, ax=ax, pos=pos, node_size=nodeSize, nodelist=users, label=nodeLabel)
nx.draw_networkx_nodes(graph, ax=ax, pos=pos, node_size=nodeSize, nodelist=discussions, node_color='b', label=nodeLabel2)
nx.draw_networkx_edges(graph, ax=ax, pos=pos, width=edgeWidth, label=edgeLabel)
userCount = len(users)
discussionCount = len(discussions)
edgeCount = len(graph.edges())
graphInfo = str() + "%s users\n%s discussions\n%s edges\n" % (userCount, discussionCount, edgeCount)
figure.text(0,0, graphInfo)
else:
nx.draw_networkx_nodes(graph, ax=ax, pos=pos, node_size=nodeSize, nodelist=graph.nodes(), node_color=nodeColor, label=nodeLabel)
nx.draw_networkx_edges(graph, ax=ax, pos=pos, width=edgeWidth, label=edgeLabel)
nodeCount = len(graph.nodes())
edgeCount = len(graph.edges())
graphInfo = str() + "%s nodes \n%s edges\n" % (nodeCount, edgeCount)
figure.text(0,0, graphInfo)
try:
matplotlib.pyplot.legend()
except:
print("Warning: drawing legend failed")
response = HttpResponse(content_type="image/png")
self.setFigureSize(figure)
figure.savefig(response, format='png', dpi=self.dpi, bbox_inches='tight')
return response
class TwoDimensionalValuesPlotter():
'''Class used to plot 2D datasets with matplotlib'''
def __init__(self, dpi=100, width=1600, height=900):
self.dpi = dpi
self.width = width
self.height = height
def setFigureSize(self, figure):
w = int(self.width/self.dpi)
h = int(self.height/self.dpi)
figure.set_size_inches(w, h)
def plot(self, xValues, yValues, filename, xlabel, ylabel):
figure = plt.figure()
subplot = figure.add_subplot(1, 1, 1)
subplot.set_xlabel(xlabel)
subplot.set_ylabel(ylabel)
subplot.grid(True)
subplot.plot(xValues, yValues)
self.setFigureSize(figure)
plt.savefig(filename, dpi=self.dpi)
def memoryPlotMultipleDatasets(self, datasets, xlabel, ylabel):
'''Plots multiple curves on the same chart'''
figure = plt.figure()
datasetCount = len(datasets)
width = math.ceil(math.sqrt(datasetCount))
for datasetId, dataset in enumerate(datasets):
subplot = figure.add_subplot(width, width, datasetId)
subplot.set_xlabel(xlabel)
subplot.set_ylabel(ylabel)
subplot.grid(True)
title = dataset[0]
xValues = dataset[1][0]
yValues = dataset[1][1]
subplot.set_title(title)
subplot.plot(xValues, yValues)
# plot title/axis/labels font size
for item in ([subplot.title, subplot.xaxis.label, subplot.yaxis.label] + subplot.get_xticklabels() + subplot.get_yticklabels()):
item.set_fontsize(8)
figure.tight_layout()
response = HttpResponse(content_type="image/png")
self.setFigureSize(figure)
figure.savefig(response, format='png')
return response
def memoryPlotMultipleDatasetsMultidimensional(self, datasetsList, xlabel, ylabel):
'''Plots multiple subcharts on the same chart'''
figure = plt.figure()
datasets = zip(*datasetsList)
legendData = [1,2] # dummy values
datasetCount = len(datasets)
width = math.ceil(math.sqrt(datasetCount))
for datasetId, datasetTuple in enumerate(datasets):
subplot = figure.add_subplot(width, width, datasetId)
subplot.set_xlabel(xlabel)
subplot.set_ylabel(ylabel)
subplot.grid(True)
title = datasetTuple[0][0]
for num, dataset in enumerate(datasetTuple):
xValues = dataset[1][0]
yValues = dataset[1][1]
subplot.set_title(title)
legendData[num], = subplot.plot(xValues, yValues, label=num)
# plot title/axis/labels font size
for item in ([subplot.title, subplot.xaxis.label, subplot.yaxis.label] + subplot.get_xticklabels() + subplot.get_yticklabels()):
item.set_fontsize(8)
figure.tight_layout()
figure.legend(legendData, ["equal time intervals", "equal reply count intervals"], loc="lower left", prop={"size": 6})
response = HttpResponse(content_type="image/png")
self.setFigureSize(figure)
figure.savefig(response, format='png')
return response | [
[
8,
0,
0.0153,
0.0255,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0357,
0.0051,
0,
0.66,
0.1111,
75,
0,
1,
0,
0,
75,
0,
0
],
[
8,
0,
0.0408,
0.0051,
0,
0.66,
... | [
"'''\nCreated on 8 juin 2013\n\n@author: Nils Amiet\n'''",
"import matplotlib",
"matplotlib.use(\"Agg\")",
"import matplotlib.pyplot as plt",
"import networkx as nx",
"import math",
"import random",
"from django.http.response import HttpResponse",
"class GraphPlotter():\n '''Class used to plot ne... |
# coding: utf-8
'''
Created on 18 juin 2013
@author: Nils Amiet
'''
import nltk
def isEnglishTweet(text):
'''
Checks that the ratio of unknown words in the given text does not exceed a threshold.
Words are checked against an English dictionary of 235k words provided by NLTK
'''
filterList = ["#", "RT", ".", ":", ",", ";", "'", "(", ")", "{", "}", "[", "]", "~", "\"", "?", "!"]
for sign in filterList:
text = text.replace(sign, "")
text = [word for word in text.split(" ") if not word.startswith("http") and not word.startswith("@")]
englishWords = set(w.lower() for w in nltk.corpus.words.words())
textWords = set(w.lower() for w in text)
unknownWords = textWords - englishWords
unknownCount = len(unknownWords)
textCount = len(textWords)
unknownFraction = unknownCount / float(textCount)
threshold = 0.5
return unknownFraction <= threshold | [
[
8,
0,
0.125,
0.1562,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.25,
0.0312,
0,
0.66,
0.5,
371,
0,
1,
0,
0,
371,
0,
0
],
[
2,
0,
0.6562,
0.7188,
0,
0.66,
... | [
"'''\nCreated on 18 juin 2013\n\n@author: Nils Amiet\n'''",
"import nltk",
"def isEnglishTweet(text):\n '''\n Checks that the ratio of unknown words in the given text does not exceed a threshold.\n Words are checked against an English dictionary of 235k words provided by NLTK\n '''\n filterList =... |
from django.db import models
import django.db.models.options as options
options.DEFAULT_NAMES = options.DEFAULT_NAMES + ('isTwitterModel',)
# Create your models here.
'''Twitter models'''
class Tweet(models.Model):
class Meta():
isTwitterModel = True
id = models.DecimalField(primary_key=True, max_digits=20, decimal_places=0)
in_reply_to_status_id = models.DecimalField(max_digits=20, decimal_places=0)
user = models.ForeignKey("User")
text = models.TextField()
created_at = models.DateTimeField()
polarity = models.FloatField()
polarity_ready = models.BooleanField()
hashtags = models.TextField()
class User(models.Model):
class Meta():
isTwitterModel = True
id = models.DecimalField(primary_key=True, max_digits=20, decimal_places=0)
screen_name = models.TextField()
statuses_count = models.DecimalField(max_digits=20, decimal_places=0)
friends_count = models.DecimalField(max_digits=20, decimal_places=0)
followers_count = models.DecimalField(max_digits=20, decimal_places=0)
lang = models.TextField()
followers_ready = models.BooleanField()
user_ready = models.BooleanField()
class Friendship(models.Model):
class Meta():
isTwitterModel = True
# user follows followed_user
user = models.ForeignKey("User", related_name="twitter_user_follow_source")
followed_user = models.ForeignKey("User", related_name="twitter_user_follow_destination")
'''Infrarouge models'''
class InfrarougeUser(models.Model):
class Meta():
db_table = "user"
id = models.IntegerField(primary_key=True)
name = models.TextField()
class InfrarougeForum(models.Model):
class Meta():
db_table = "forum"
id = models.IntegerField(primary_key=True)
title = models.TextField()
description = models.TextField()
class InfrarougeForumThread(models.Model):
class Meta():
db_table = "forumthread"
id = models.IntegerField(primary_key=True)
title = models.TextField()
description = models.TextField()
url = models.TextField()
fkparentforum = models.ForeignKey("InfrarougeForum")
fkauthor = models.ForeignKey("InfrarougeUser")
class InfrarougeThreadMessage(models.Model):
class Meta():
db_table = "threadmessage"
id = models.IntegerField(primary_key=True)
message = models.TextField()
fkforumthread = models.ForeignKey("InfrarougeForumThread")
fkauthor = models.ForeignKey("InfrarougeUser")
timestamp = models.DateTimeField()
polarity = models.FloatField()
# does not work because Django doesn't support tables without primary keys or tables with composed primary key (more than 1 column)
class InfrarougeReply(models.Model):
class Meta():
db_table = "reply"
id = models.IntegerField(primary_key=True)
fkfrom = models.ForeignKey("InfrarougeUser", related_name="infrarougeuser_infrarougereply_from")
fkto = models.ForeignKey("InfrarougeUser", related_name="infrarougeuser_infrarougereply_to")
fkthreadmessage = models.ForeignKey("InfrarougeThreadMessage") | [
[
1,
0,
0.0112,
0.0112,
0,
0.66,
0,
40,
0,
1,
0,
0,
40,
0,
0
],
[
1,
0,
0.0225,
0.0112,
0,
0.66,
0.0833,
962,
0,
1,
0,
0,
962,
0,
0
],
[
14,
0,
0.0449,
0.0112,
0,
0... | [
"from django.db import models",
"import django.db.models.options as options",
"options.DEFAULT_NAMES = options.DEFAULT_NAMES + ('isTwitterModel',)",
"'''Twitter models'''",
"class Tweet(models.Model):\n class Meta():\n isTwitterModel = True\n \n id = models.DecimalField(primary_key=True,... |
# coding: utf-8
'''
Created on 4 jul 2013
@author: Nils Amiet
'''
import unittest
import os, sys
''' Required for running the script from anywhere outside eclipse'''
os.chdir(os.path.dirname(os.path.realpath(__file__)))
sys.path.append('..')
from SentiStrength.sentistrength import SentiStrength
from Tools.Timer import Timer
from ITInfluence.language import isEnglishTweet
from ITInfluence.polarity import PolarityCounter
class SentiStrengthTest(unittest.TestCase):
'''Tests for SentiStrength'''
def testPerformance(self):
line = "Even though it looked good at first, this movie is utterly bad."
text = line
textCount = 16000 # claims to annotate 16k texts in a second
for unused in range(textCount):
text += "\n" + line
timer = Timer()
timer.start()
s = SentiStrength()
unused = s.classifiy(text)
timer.stop()
# Allow some margin because 16k is not many, it works best with larger amounts.
# For instance with 160k the relative margin is much smaller.
margin = 1000
expectedTime = 1000 + margin
experimentalValue = timer.totalTimeMillis <= expectedTime
expectedValue = True
errorMessage = "Took %s millis." % timer.totalTimeMillis
self.assertEqual(experimentalValue, expectedValue, errorMessage)
class LanguageTest(unittest.TestCase):
'''Tests for the language filter module'''
def testTexts(self):
texts = [
(False, "Je me promène le long de la côte sud-ouest de l'afrique"),
(True, "I live in New York because I think it's the most beautiful city in the world"),
(False, "Je t'aime"),
(True, "I love you"),
# English tweets
(True, "RT @bidon Beau gave Luke a hickey.. Really shouldn't be jealous..@BrooksBeau @luke_brooks WANT ONE:( http://t.co/0uBKDDZSIB"),
(True, "#GGM I love socks too ,me Too. @AHuston_FanPage @NicolasCageFake @elderkeeper @AlbusDumby7 @mondesdegwenn @J0HNNYDepp http://t.co/OsTX4MdxvY"),
(True, "RT @mashable: Don't Bother Following the Fiat Abarth 500 (@Abarth500_DE) on Twitter — It's Too Damn Fast http://t.co/5XopBSt4IS #innovative"),
# Foreign tweets
(False, "Amacın 2 tane ağaçtan çok daha derin olduğu ortada ama TR bölünmez! @yigitbulutt http://t.co/7t3vHf7VDd"),
(False, "RT @MlaniX1: Voila la photo pour vous remercier grâce a vous je suis a plus de 500 followers merci beaucoup <3 http://t.co/Uv5YXmPuvM"),
(False, "RT @ta9ton: 不覚にもイオン閉店に間に合わず隔離された僕は… http://t.co/zRnOnpTFVN"),
]
# We'd like to have the full debug in case more than one text fails
exception = None
for expectedValue, text in texts:
experimentalValue = isEnglishTweet(text)
errorMessage = "False classification of text %s" % text
try:
self.assertEqual(expectedValue, experimentalValue, errorMessage)
except Exception as e:
print(e)
exception = e
if exception is not None:
raise exception
class PolarityTests(unittest.TestCase):
'''Tests for polarity counting'''
def setUp(self):
r1 = {"from": 4, "to": 5, "polarity": 0.8}
r2 = {"from": 5, "to": 4, "polarity": 0.2}
r3 = {"from": 4, "to": 2, "polarity": -0.3}
r4 = {"from": 2, "to": 5, "polarity": 0.1}
self.replies = [r1, r2, r3, r4]
def testPolarity(self):
polarityCounter = PolarityCounter(self.replies)
ranking = polarityCounter.polarityDifferenceRanking()
experimentalValue = ranking[0][0]
# node 2 should be first because it has the greatest polarity difference
expectedValue = 2
errorMessage = "Incorrect ranking. User %s should be first" % expectedValue
self.assertEqual(experimentalValue, expectedValue, errorMessage)
def testNDI(self):
polarityCounter = PolarityCounter(self.replies)
experimentalValue = polarityCounter.NDI
# NDI = sum of squares of differences
# = (0.5)^2 + (0.5)^2 + (1.0)^2
# = 0.25 + 0.25 + 1
# = 1.5
expectedValue = 1.5
errorMessage = "Wrong NDI value. Expected: %s, experimental: %s" % (expectedValue, experimentalValue)
self.assertEqual(experimentalValue, expectedValue, errorMessage)
if __name__ == "__main__":
unittest.main() | [
[
8,
0,
0.0336,
0.042,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.084,
0.0084,
0,
0.66,
0.0769,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0924,
0.0084,
0,
0.66,
... | [
"'''\nCreated on 4 jul 2013\n\n@author: Nils Amiet\n'''",
"import unittest",
"import os, sys",
"''' Required for running the script from anywhere outside eclipse'''",
"os.chdir(os.path.dirname(os.path.realpath(__file__)))",
"sys.path.append('..')",
"from SentiStrength.sentistrength import SentiStrength"... |
'''
Created on 13 juin 2013
@author: Nils Amiet
'''
from ITInfluence.models import Tweet
def getAllHashtags():
tweets = Tweet.objects.all().exclude(hashtags="")
hashtags = {}
for tweet in tweets:
currentTweetHashtags = tweet.hashtags.split(" ")
for tag in currentTweetHashtags:
try:
hashtags[tag] += 1
except KeyError:
hashtags[tag] = 1
hashtags = [(x,y) for x, y in hashtags.items()]
return hashtags
def getSimilarHashtags(hashtag):
similarHashtags = {}
tweets = Tweet.objects.all().exclude(hashtags="").filter(hashtags__contains=hashtag)
for tweet in tweets:
currentTweetHashtags = tweet.hashtags.split(" ")
try:
currentTweetHashtags.remove(hashtag)
except:
pass
for tag in currentTweetHashtags:
try:
similarHashtags[tag] += 1
except KeyError:
similarHashtags[tag] = 1
similarHashtags = [(x,y) for x, y in similarHashtags.items()]
return similarHashtags | [
[
8,
0,
0.0714,
0.119,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1667,
0.0238,
0,
0.66,
0.3333,
316,
0,
1,
0,
0,
316,
0,
0
],
[
2,
0,
0.369,
0.3333,
0,
0.66,
... | [
"'''\nCreated on 13 juin 2013\n\n@author: Nils Amiet\n'''",
"from ITInfluence.models import Tweet",
"def getAllHashtags():\n tweets = Tweet.objects.all().exclude(hashtags=\"\")\n hashtags = {}\n \n for tweet in tweets:\n currentTweetHashtags = tweet.hashtags.split(\" \")\n for tag in c... |
'''
Created on 10 juin 2013
@author: Nils Amiet
'''
import threading
from ITInfluence.models import User, Friendship
from ITInfluence.twitter import TwitterREST
rest = TwitterREST()
def collectRequest(query):
userRequestPerMinute = 1
followersPerRequest = 5000
val = User.objects.raw(query, [followersPerRequest])
req = val[0].req
try:
minutes = req / userRequestPerMinute
hours = minutes / 60
days = hours / 24
return days, hours, minutes
except:
return 0, 0, 0
def getTotalTimeToCollectFollowers():
'''Returns an estimation of the time required to collect all the followers for the users in the database'''
query = """
SELECT id, sum(IF(followers_count, ceiling(followers_count/%s), 1)) as req
FROM ITInfluence_user
WHERE followers_count > 0
"""
return collectRequest(query)
def getRemainingTimeToCollectFollowers():
'''Returns an estimation of the time required to collect the followers for the users that we didnt get the followers yet.'''
query = """
SELECT id, sum(IF(followers_count, ceiling(followers_count/%s), 1)) as req
FROM ITInfluence_user
WHERE followers_ready=0
"""
return collectRequest(query)
def collectFollowersData():
'''Collect the followers for all users in the database that have not their followers collected yet'''
print("Started collecting followers.")
users = User.objects.all().filter(followers_ready=False).filter(followers_count__gte=1)
for user in users:
# stop collecting if asked
if not rest.isCollectingFollowers:
return
followers = rest.getFollowers(int(user.id), getAll=True) # rate limited 15 every 15 minutes
if followers is None:
print("Stopped collecting followers.")
return # stop collecting
print("Collected %s followers" % len(followers))
print("Inserting collected followers into database...")
for follower in followers:
# stop collecting if asked
if not rest.isCollectingFollowers:
print("Stopped collecting followers.")
return
source = user
try:
destination = User.objects.get(id=follower)
except:
# destination = createUser(follower)
destination = User(id=follower)
destination.followers_ready = True
destination.user_ready = False
destination.statuses_count = 0
destination.friends_count = 0
destination.followers_count = 0
destination.save()
friendship = Friendship(user=destination, followed_user=source)
friendship.save()
user.followers_ready = True
user.save()
print("...done inserting followers for this user!")
print("Stopped collecting followers.")
rest.isCollectingFollowers = False
def runFollowersCollection():
thread = threading.Thread(target=collectFollowersData)
thread.daemon = True
thread.start()
def toggleFollowersCollection():
rest.isCollectingFollowers = not rest.isCollectingFollowers
if rest.isCollectingFollowers:
runFollowersCollection()
def createUser(userId):
user = rest.getUser(userId) # rate limited 180 every 15 minutes
return user
| [
[
8,
0,
0.0278,
0.0463,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0648,
0.0093,
0,
0.66,
0.0909,
83,
0,
1,
0,
0,
83,
0,
0
],
[
1,
0,
0.0833,
0.0093,
0,
0.66,
... | [
"'''\nCreated on 10 juin 2013\n\n@author: Nils Amiet\n'''",
"import threading",
"from ITInfluence.models import User, Friendship",
"from ITInfluence.twitter import TwitterREST",
"rest = TwitterREST()",
"def collectRequest(query):\n userRequestPerMinute = 1\n followersPerRequest = 5000\n \n val... |
'''
Created on 18 juin 2013
@author: Nils Amiet
'''
from ITInfluence.models import Tweet, User, Friendship
import networkx as nx
import math
class TwitterFollowersGraphBuilder():
'''
Builds the followers graph
'''
def __init__(self):
self.graph = nx.DiGraph()
self.nodeSizes = []
self.nodeFriendCounts = []
print("Building graph...")
print("Running user query...")
maxFollowersCount = 500
minFollowersCount = 5000
### You can uncomment one of the following lines with "users = ..." at your choice
# All users that have at most 500 followers
users = User.objects.all().filter(followers_count__lte=maxFollowersCount).exclude(followers_count=0)
# All users that have at least 5000 followers
# users = User.objects.all().filter(followers_count__gte=minFollowersCount).exclude(followers_count=0)
# All users than have at least one follower
# users = User.objects.all().exclude(followers_count=0)
# Don't run this one if you have less than 16GB memory on large networks (>10M)
# users = User.objects.all()
print("done!")
self.buildGraph(users)
print("...done building graph!")
print("Processed %s users" % len(users))
def buildGraph(self, users):
print("Counting users...")
userCount = len(users)
print("done!")
counter = 0
for user in users:
percentage = 100*(float(counter)/userCount)
print("Processing user %s/%s (%s" %(counter, userCount, percentage) + "%)")
userId = int(user.id)
self.graph.add_node(userId)
userWeight = self.getWeight(int(user.followers_count))
friendships = Friendship.objects.all().filter(user__id=int(user.id))
friendshipCount = friendships.count()
friendshipCounter = 0
for friendship in friendships:
percentage = 100*(float(friendshipCounter)/friendshipCount)
print("Processing friendship %s/%s (%s" %(friendshipCounter, friendshipCount, percentage) + "%)"+ " - User: %s" % percentage + "%")
self.graph.add_edge(userId, int(friendship.followed_user.id))
friendshipCounter += 1
counter += 1
nodeCount = len(self.graph.nodes())
counter = 1
for node in self.graph.nodes():
percentage = 100*(float(counter)/nodeCount)
print("Processing node %s/%s (%s" %(counter, nodeCount, percentage) + "%)")
user = User.objects.get(id=node)
userWeight = self.getWeight(int(user.followers_count))
self.nodeSizes += [userWeight]
userFriendsCount = self.graph.out_degree(node)
self.nodeFriendCounts += [userFriendsCount]
counter += 1
def getWeight(self, followers):
nodeSize = 25
logBase = 10
return nodeSize + 4 ** math.log(followers, logBase)
class TwitterGraphBuilder():
'''
Builds the replies graph for collected Twitter data
'''
def __init__(self, hashtagOfInterest="summer"):
'''
Constructor
'''
self.graph = nx.DiGraph()
print("Building graph...")
print("Running tweet query...")
### Uncomment one of the following lines "tweets = ..."
# Use a not so frequent hashtag, fast computation
tweets = Tweet.objects.all().filter(hashtags__icontains=hashtagOfInterest)
# Don't run this if you have less than 32GB memory for large networks (>10M).
# tweets = Tweet.objects.all()
self.buildGraph(tweets)
print("...done building graph!")
def buildGraph(self, tweets):
tweetCount = len(tweets)
counter = 0
for tweet in tweets:
percentage = 100*(float(counter)/tweetCount)
print("Processing tweet %s/%s (%s" %(counter, tweetCount, percentage) + "%)")
author = tweet.user
replyTo = int(tweet.in_reply_to_status_id)
if replyTo is 0:
# it's a reply to all of the author's followers
followers = self.getFollowers(author)
followersCount = len(followers)
followerCounter = 0
for follower in followers:
followerPercentage = 100*(float(followerCounter)/followersCount)
print("Processing follower %s/%s (%s" %(followerCounter, followersCount, followerPercentage) + "%)" + " - Tweet: %s" % percentage + "%")
self.addReply(author, follower.user)
followerCounter += 1
else:
try:
toUser = User.objects.get(replyTo)
self.addReply(author, toUser)
except:
# print("Warning: in_reply_to user doesn't exist")
pass
counter += 1
def getFollowers(self, user):
friendships = Friendship.objects.all().filter(followed_user__id=int(user.id))
return friendships
def addReply(self, fromUser, toUser):
self.graph.add_edge(int(fromUser.id), int(toUser.id)) | [
[
8,
0,
0.0207,
0.0345,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0483,
0.0069,
0,
0.66,
0.2,
316,
0,
3,
0,
0,
316,
0,
0
],
[
1,
0,
0.0552,
0.0069,
0,
0.66,
... | [
"'''\nCreated on 18 juin 2013\n\n@author: Nils Amiet\n'''",
"from ITInfluence.models import Tweet, User, Friendship",
"import networkx as nx",
"import math",
"class TwitterFollowersGraphBuilder():\n '''\n Builds the followers graph\n '''\n \n def __init__(self):\n self.graph = nx.DiGra... |
# coding: utf-8
'''
Created on 23 mai 2013
@author: Nils Amiet
'''
class PolarityCounter:
polarityCounts = {}
RECEIVED = "recv"
SENT = "sent"
AVERAGE = "avg"
NDI = -1 # network disagreement index
def __init__(self, replies):
'''
Replies: a list of replies with the attributes "from", "to" and "polarity"
'''
self.replies = replies
self.edges = []
self.countPolarities()
self.computeAveragePolarities()
self.computeNDI()
def countPolarities(self):
for reply in self.replies:
fromUser = reply["from"]
toUser = reply["to"]
polarity = reply["polarity"]
# negative polarity becomes 0 and positive becomes 1
polarity = 0 if polarity <= 0 else 1
try:
fromUserCounts = self.polarityCounts[fromUser][self.SENT]
except KeyError:
self.polarityCounts[fromUser] = {self.RECEIVED : {}, self.SENT: {}, self.AVERAGE: None}
fromUserCounts = self.polarityCounts[fromUser][self.SENT]
try:
toUserCounts = self.polarityCounts[toUser][self.RECEIVED]
except KeyError:
self.polarityCounts[toUser] = {self.RECEIVED : {}, self.SENT: {}, self.AVERAGE: None}
toUserCounts = self.polarityCounts[toUser][self.RECEIVED]
try:
fromUserCounts[polarity] += 1
except:
fromUserCounts[polarity] = 1
try:
toUserCounts[polarity] += 1
except:
toUserCounts[polarity] = 1
def computeAveragePolarities(self):
for user, userCounts in self.polarityCounts.items():
try:
receivedPositive = userCounts[self.RECEIVED][1]
except:
receivedPositive = 0
try:
receivedNegative = userCounts[self.RECEIVED][0]
except:
receivedNegative = 0
try:
sentPositive = userCounts[self.SENT][1]
except:
sentPositive = 0
try:
sentNegative = userCounts[self.SENT][0]
except:
sentNegative = 0
try:
recv = (receivedPositive - receivedNegative) / float(receivedPositive + receivedNegative)
except:
pass # user never received a message
try:
sent = (sentPositive - sentNegative) / float(sentPositive + sentNegative)
except:
pass # user never sent a message
try:
userCounts[self.AVERAGE] = abs(recv - sent)
except:
pass # user never received or sent a message
def computeNDI(self):
self.computeEdges()
sumNDI = 0
for edge in self.edges:
weight = 1
firstUser = edge["from"]
secondUser = edge["to"]
try:
firstUserOpinion = self.polarityCounts[firstUser][self.AVERAGE] / float(2)
except:
firstUserOpinion = 0
try:
secondUserOpinion = self.polarityCounts[secondUser][self.AVERAGE] / float(2)
except:
secondUserOpinion = 0
increment = weight * ((firstUserOpinion - secondUserOpinion)**2)
sumNDI += increment
self.NDI = sumNDI
def computeEdges(self):
for reply in self.replies:
if not self.contains(reply, self.edges):
self.edges += [reply]
def contains(self, reply, edges):
for edge in edges:
if self.isSameMessage(reply, edge):
return True
return False
def isSameMessage(self, reply, reply2):
return reply["from"] == reply2["from"] and reply["to"] == reply2["to"] or reply["from"] == reply2["to"] and reply["to"] == reply2["from"]
def polarityDifferenceRanking(self):
users = {user: userCounts[self.AVERAGE] for user, userCounts in self.polarityCounts.items() if not userCounts[self.AVERAGE] == None}
ranking = sorted(users.items(), key=lambda x: x[1], reverse=True)
return ranking | [
[
8,
0,
0.0315,
0.0394,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
3,
0,
0.5315,
0.9449,
0,
0.66,
1,
151,
0,
8,
0,
0,
0,
0,
15
],
[
14,
1,
0.0787,
0.0079,
1,
0.13,
... | [
"'''\nCreated on 23 mai 2013\n\n@author: Nils Amiet\n'''",
"class PolarityCounter:\n \n polarityCounts = {}\n RECEIVED = \"recv\"\n SENT = \"sent\"\n AVERAGE = \"avg\"\n NDI = -1 # network disagreement index",
" polarityCounts = {}",
" RECEIVED = \"recv\"",
" SENT = \"sent\"",
"... |
'''
Created on 14 juin 2013
@author: Nils Amiet
'''
# coding: utf-8
import networkx as nx
import sqlite3
class InfrarougeGraphBuilder():
userRepliesCounts = {}
graph1 = nx.DiGraph()
userParticipations = []
graph2 = nx.Graph()
def __init__(self, databasePath):
self.infrarougeDatabasePath = databasePath
self.countUserReplies()
self.createRepliesGraph()
self.buildUserParticipations()
self.createParticipationGraph()
def countUserReplies(self):
with sqlite3.connect(self.infrarougeDatabasePath) as connection:
cursor = connection.cursor()
query = "SELECT * FROM reply"
cursor.execute(query)
for reply in cursor:
self.countReply(reply)
cursor.close()
def countReply(self, reply):
fromUser = reply[0]
toUser = reply[1]
fromTo = (fromUser, toUser)
try:
self.userRepliesCounts[fromTo] += 1
except KeyError:
self.userRepliesCounts[fromTo] = 1
def createRepliesGraph(self):
for fromTo, w in self.userRepliesCounts.items():
try:
self.graph1.add_edge(fromTo[0], fromTo[1], weight=w)
except:
print("Warning: adding edge failed")
def buildUserParticipations(self):
with sqlite3.connect(self.infrarougeDatabasePath) as connection:
cursor = connection.cursor()
query = "SELECT * FROM threadmessage"
cursor.execute(query)
for threadMessage in cursor:
forumThread = threadMessage[2]
user = threadMessage[3]
userThreadTuple = (user, forumThread)
self.userParticipations += [userThreadTuple]
cursor.close()
def createParticipationGraph(self):
users = [x[0] for x in self.userParticipations]
discussions = [x[1] for x in self.userParticipations]
self.graph2.add_nodes_from(users, bipartite=0)
self.graph2.add_nodes_from(discussions, bipartite=1)
self.graph2.add_edges_from(self.userParticipations)
| [
[
8,
0,
0.038,
0.0633,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1139,
0.0127,
0,
0.66,
0.3333,
691,
0,
1,
0,
0,
691,
0,
0
],
[
1,
0,
0.1266,
0.0127,
0,
0.66,... | [
"'''\nCreated on 14 juin 2013\n\n@author: Nils Amiet\n'''",
"import networkx as nx",
"import sqlite3",
"class InfrarougeGraphBuilder():\n \n userRepliesCounts = {}\n graph1 = nx.DiGraph()\n \n userParticipations = []\n graph2 = nx.Graph()",
" userRepliesCounts = {}",
" graph1 = nx.... |
'''
Created on 11 juin 2013
@author: Nils Amiet
'''
from ITInfluence.polarity import PolarityCounter
from ITInfluence.models import Tweet, User, Friendship
from ITInfluence.hashtags import getAllHashtags
from InfrarougeTwitterInfluence import settings
import sqlite3
import math
infrarougeDatabasePath = settings.DATABASES["infrarouge"]["NAME"]
def getUsers():
getUsers = []
with sqlite3.connect(infrarougeDatabasePath) as connection:
cursor = connection.cursor()
query = "SELECT * FROM user"
cursor.execute(query)
for user in cursor:
getUsers += [user]
cursor.close()
return getUsers
def getForums():
forums = []
with sqlite3.connect(infrarougeDatabasePath) as connection:
cursor = connection.cursor()
query = """
SELECT *
FROM forum
"""
cursor.execute(query)
for forum in cursor:
builtForum = {
"id": forum[0],
"title": forum[1],
"description": forum[2],
}
forums += [builtForum]
return forums
def polarityReplies(forum=None):
'''Returns a list of replies containing the source, destination, polarity, timestamp and forum id'''
replies = []
with sqlite3.connect(infrarougeDatabasePath) as connection:
cursor = connection.cursor()
query = """
SELECT r.fkfrom, r.fkto, m.polarity, m.timestamp, f.id
FROM reply as r
INNER JOIN threadmessage as m
ON r.fkthreadmessage=m.id
INNER JOIN forumthread as t
ON m.fkforumthread=t.id
INNER JOIN forum as f
ON t.fkparentforum=f.id
"""
if forum is not None:
query += "WHERE f.id=" + str(forum)
cursor.execute(query)
for reply in cursor:
builtReply = {
"from": reply[0],
"to": reply[1],
"polarity": reply[2],
"timestamp": reply[3],
"forum": reply[4]
}
replies += [builtReply]
cursor.close()
return replies
def polarizationForAllForums(equalTime=True, equalRepliesCount=True):
forums = getForums()
resultsEqualTime = []
resultsEqualRepliesCount = []
for forum in forums:
forumID = forum["id"]
title = "Forum " + str(forumID)
replies = polarityReplies(forum=forumID)
timeSortedReplies = sorted(replies, key=lambda x: x["timestamp"])
sliceCount = 20
# equal time interval
if equalTime:
res = computePolarizationOverTime(timeSortedReplies, forumID, sliceCount)
resultsEqualTime += [(title, res)]
# equal replies count interval
if equalRepliesCount:
res2 = computePolarizationOverTimeSamePostCount(timeSortedReplies, forumID, sliceCount)
resultsEqualRepliesCount += [(title, res2)]
return resultsEqualTime, resultsEqualRepliesCount
def computePolarizationOverTimeSamePostCount(replies, forumID, sliceCount):
repliesCount = len(replies)
deltaRepliesFloat = repliesCount / float(sliceCount)
deltaReplies = int(math.ceil(repliesCount / sliceCount))
if deltaRepliesFloat is not deltaReplies:
deltaReplies += 1
ndiVariation = []
replyOffset = 0
while replyOffset < repliesCount:
upperOffset = replyOffset + deltaReplies + 1
r = replies[replyOffset:upperOffset]
polarityCounter = PolarityCounter(r)
edgesCount = len(polarityCounter.edges)
ndiVariation += [(int(replyOffset), polarityCounter.NDI, edgesCount)]
replyOffset += deltaReplies
times = [x[0] for x in ndiVariation]
times = [i for i, unused in enumerate(times)]
ndis = [x[1] for x in ndiVariation]
return times, ndis
def computePolarizationOverTime(replies, forumID, sliceCount):
first = replies[0]["timestamp"]
last = replies[-1]["timestamp"]
interval = last - first
deltaTimeMillis = interval / sliceCount
ndiVariation = []
timeThreshold = first + deltaTimeMillis
while timeThreshold <= last:
lowerBound = timeThreshold - deltaTimeMillis
upperBound = timeThreshold
r = repliesInTimeInterval(replies, lowerBound, upperBound)
polarityCounter = PolarityCounter(r)
ndiVariation += [(int(timeThreshold), polarityCounter.NDI)]
timeThreshold += deltaTimeMillis
times = [x[0] for x in ndiVariation]
times = [i for i, unused in enumerate(times)]
ndis = [x[1] for x in ndiVariation]
return times, ndis
def repliesInTimeInterval(replies, lowerBound, upperBound):
return [reply for reply in replies if reply["timestamp"] >= lowerBound and reply["timestamp"] <= upperBound ]
'''Twitter'''
def getNDIForMostFrequentHashtags(equalTime=True, equalRepliesCount=True):
hashtags = mostFrequentHashtags()
print("Most frequent hashtags:")
print(hashtags)
resultsEqualTimeIntervals = []
resultsEqualRepliesCountIntervals = []
sliceCount = 20
# Adjust this value as you wish
# Greater value = More accurate results but more computation time
maxFollowersCount = 50
for hashtag in hashtags:
eqTime, eqReplyCount = getNDIForHashtag(hashtag, sliceCount=sliceCount, maxFollowersCount=maxFollowersCount, equalTime=equalTime, equalRepliesCount=equalRepliesCount)
resultsEqualTimeIntervals += eqTime
resultsEqualRepliesCountIntervals += eqReplyCount
return resultsEqualTimeIntervals, resultsEqualRepliesCountIntervals
def getNDIForHashtag(hashtag, sliceCount=20, maxFollowersCount=100, equalTime=True, equalRepliesCount=True):
resultsEqualTimeIntervals = []
resultsEqualRepliesCountIntervals = []
print("Computing NDIs for hashtag %s..." % hashtag)
### Uncomment one of the following lines "tweets = ..."
# Users with at most x followers, fast computation
tweets = Tweet.objects.all().filter(user__followers_count__lte=maxFollowersCount).filter(hashtags__icontains=hashtag)
# Complete dataset: Requires a lot of memory and a fast CPU for large networks
# tweets = Tweet.objects.all().filter(hashtags__icontains=hashtag)
timeSortedTweets = sorted(tweets, key=lambda x: x.created_at)
if equalTime:
ndis = getNDIValuesForHashtag(hashtag, sliceCount, timeSortedTweets)
resultsEqualTimeIntervals += [("#" + hashtag, ndis)]
if equalRepliesCount:
ndis = getNDIValuesEqualRepliesCountForHashtag(hashtag, sliceCount, timeSortedTweets)
resultsEqualRepliesCountIntervals += [("#" + hashtag, ndis)]
return resultsEqualTimeIntervals, resultsEqualRepliesCountIntervals
def mostFrequentHashtags():
hashtagCount = 6 # n most frequent
hashtags = getAllHashtags()
hashtags = sorted(hashtags, key=lambda x: x[1], reverse=True) # sort by number of occurences
hashtags = [tag.lower() for tag, unused in hashtags]
hashtags = convertListToSetKeepingOrder(hashtags)
hashtags = hashtags[:hashtagCount]
return hashtags
def convertListToSetKeepingOrder(xs):
# Inspired from http://stackoverflow.com/a/480227
seen = set()
addToSet = seen.add
return [x for x in xs if x not in seen and not addToSet(x)]
def computeReplies(tweets):
replies = []
for tweet in tweets:
authorId = int(tweet.user.id)
polarity = float(tweet.polarity)
replyTo = int(tweet.in_reply_to_status_id)
if replyTo is not 0: # reply to single person
try:
user = User.objects.get(id=replyTo)
userId = int(user.id)
builtReply = {
"from": authorId,
"to": userId,
"polarity": polarity,
}
replies += [builtReply]
except:
# reply to all his/her followers
replies += getFollowerReplies(authorId, polarity)
else:
# reply to all his/her followers
replies += getFollowerReplies(authorId, polarity)
return replies
def getFollowerReplies(authorId, polarity):
replies = []
friendships = Friendship.objects.all().filter(followed_user__id=authorId)
followers = [int(f.user.id) for f in friendships]
for follower in followers:
builtReply = {
"from": authorId,
"to": follower,
"polarity": polarity,
}
replies += [builtReply]
return replies
def getNDIValuesEqualRepliesCountForHashtag(hashtag, sliceCount, timeSortedTweets):
ndiVariation = []
if len(timeSortedTweets) >= 2:
tweetCount = len(timeSortedTweets)
deltaTweetsFloat = tweetCount / float(sliceCount)
deltaTweets = int(math.ceil(tweetCount / sliceCount))
if deltaTweetsFloat is not deltaTweets:
deltaTweets += 1
tweetOffset = 0
sliceIndex = 1
while tweetOffset < tweetCount:
print("Computing slice %s/%s" %(sliceIndex, sliceCount))
upperOffset = tweetOffset + deltaTweets + 1
tweetsSubset = timeSortedTweets[tweetOffset:upperOffset]
replies = computeReplies(tweetsSubset)
polarityCounter = PolarityCounter(replies)
ndiVariation += [(int(tweetOffset), polarityCounter.NDI)]
tweetOffset += deltaTweets
sliceIndex += 1
times = [x[0] for x in ndiVariation]
times = [i for i, unused in enumerate(times)]
ndis = [x[1] for x in ndiVariation]
return times, ndis
def getNDIValuesForHashtag(hashtag, sliceCount, timeSortedTweets):
ndiVariation = []
if len(timeSortedTweets) >= 2:
first = timeSortedTweets[0].created_at
last = timeSortedTweets[-1].created_at
interval = last - first
deltaTimeMillis = interval / sliceCount
sliceIndex = 1
timeUpperBound = first + deltaTimeMillis
while timeUpperBound <= last:
print("Computing slice %s/%s" %(sliceIndex, sliceCount))
tweetsSubset = tweetsInInterval(timeSortedTweets, timeUpperBound - deltaTimeMillis, timeUpperBound)
replies = computeReplies(tweetsSubset)
polarityCounter = PolarityCounter(replies)
ndiVariation += [(timeUpperBound, polarityCounter.NDI)]
timeUpperBound += deltaTimeMillis
if timeUpperBound == timeUpperBound + deltaTimeMillis:
break
sliceIndex += 1
times = [x[0] for x in ndiVariation]
times = [i for i, unused in enumerate(times)]
ndis = [x[1] for x in ndiVariation]
return times, ndis
def tweetsInInterval(tweets, timeLowerBound, timeUpperBound):
return [tweet for tweet in tweets if tweet.created_at >= timeLowerBound and tweet.created_at <= timeUpperBound] | [
[
8,
0,
0.0089,
0.0148,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0207,
0.003,
0,
0.66,
0.0417,
330,
0,
1,
0,
0,
330,
0,
0
],
[
1,
0,
0.0237,
0.003,
0,
0.66,
... | [
"'''\nCreated on 11 juin 2013\n\n@author: Nils Amiet\n'''",
"from ITInfluence.polarity import PolarityCounter",
"from ITInfluence.models import Tweet, User, Friendship",
"from ITInfluence.hashtags import getAllHashtags",
"from InfrarougeTwitterInfluence import settings",
"import sqlite3",
"import math",... |
'''
Created on 7 juin 2013
@author: Nils Amiet
'''
import threading
import time
import math
from twython import TwythonStreamer, Twython, TwythonRateLimitError
from twython.exceptions import TwythonError
from requests.exceptions import ConnectionError
from django.db import DatabaseError
from ITInfluence.models import Tweet, User
from ITInfluence.language import isEnglishTweet
from ITInfluence.hashtags import getSimilarHashtags
###
# Configuration starts here
###
'''
These values are provided for your personal use but since the code is distributed to multiple people,
and there can be only one machine using this account at the same time,
please use the values from your own Twitter account.
Register a regular Twitter account and then register an app at http://dev.twitter.com/apps
'''
APP_KEY = "pjoZEliZOQNl9D4tLGljA"
APP_SECRET = "1Wp2Jd2eiKVWYH8nJFm3wg6C3bnf10k1D25uzajloU"
OAUTH_TOKEN = "1479001058-kJibhcFtcHvUKvbFnKgUO8zPlTpb2MC6HCr1Z1z"
OAUTH_TOKEN_SECRET = "KmhIChKm9nENIXt5P5xfotOgxlaI9JfDBy3eZ4ZVKDc"
'''
Enables or disables the stream monitor.
The stream monitor periodically checks whether the incoming tweet rate is too low.
In such a case it restarts the stream with additional similar hashtags.
'''
isStreamMonitorEnabled = True
'''
Number of seconds between each stream monitor check.
'''
monitorCheckInterval = 300 # seconds
'''
Minimum number of incoming tweets per minute required.
Used by the stream monitor to decide whether it should restart with similar hashtags.
'''
incomingTweetRateThreshold = 0.5 # minimum tweets per minute
'''
Number of seconds after which the stream is stopped if not a single tweet was received.
REMARK: You should not set streamTimeout to a value that is less than monitorCheckInterval because
it may cause having two or more streams opened at the same time, which is forbidden
by the Twitter API.
'''
streamTimeout = 180 # seconds
###
# Configuration ends here
###
streamingHashtag = ""
class TwitterREST():
'''Class to handle operations that require a call to the Twitter REST API'''
TIME_INTERVAL_IN_SECONDS = (15 * 60) + 2 # add 2 seconds just to be sure
MAX_FOLLOWERS_PER_REQUEST = 5000
MAX_RETRY_COUNT = 30
RETRY_INTERVAL_IN_SECONDS = 30
isCollectingFollowers = False
def __init__(self):
self.twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
def stopCollecting(self):
self.isCollectingFollowers = False
def sleepFor(self, seconds):
startTime = time.time()
for unused in range(seconds):
time.sleep(1)
if not self.isCollectingFollowers:
return False
# make sure we slept for long enough
while True:
currentTime = time.time()
if currentTime - startTime < seconds:
time.sleep(1)
else:
break
return True
def getFollowers(self, userId, getAll=False, retry=0):
self.isCollectingFollowers = True
try:
totalFollowersCount = int(User.objects.get(id=userId).followers_count)
except:
totalFollowersCount = 0
requiredCallsCount = int(math.ceil(totalFollowersCount/float(self.MAX_FOLLOWERS_PER_REQUEST)))
partCount = 1
# define rate limited operation
def doGetFollowers(**kwargs):
try:
return self.twitter.get_followers_ids(user_id=userId, **kwargs)
except ConnectionError as e:
print(e)
print("ConnectionError! Retrying...")
self.sleepFor(10)
return doGetFollowers(**kwargs)
try:
self.displayFollowersCollectionStatus(userId, partCount, requiredCallsCount)
returnValue = doGetFollowers()
except TwythonRateLimitError:
# wait a complete Twitter time interval
print("Sleeping until limit is lifted...")
if not self.sleepFor(self.TIME_INTERVAL_IN_SECONDS):
return # stop collecting
# try again
print("Retrying...")
self.displayFollowersCollectionStatus(userId, partCount, requiredCallsCount)
returnValue = doGetFollowers()
except TwythonError as e:
# User does not exist
# User's account was probably deleted after we collected his tweets
if "404" in str(e):
print("WARNING: User %s does not exist!" % userId)
return []
if "401" in str(e):
print("WARNING: User %s is probably private! Can't collect his/her followers." % userId)
return []
# Twitter is down / API changed / unknown error
# wait a few seconds and retry
print(e)
print("Retrying in %s seconds..." % self.RETRY_INTERVAL_IN_SECONDS)
if not self.sleepFor(self.RETRY_INTERVAL_IN_SECONDS):
return
if retry > self.MAX_RETRY_COUNT:
print("Max retry count reached. Aborting...")
return
print("Retrying... attempt %s/%s" % (retry, self.MAX_RETRY_COUNT))
return self.getFollowers(userId, getAll=getAll, retry=retry + 1)
partCount += 1
followers = returnValue["ids"]
if getAll:
while returnValue["next_cursor"] is not 0:
try:
self.displayFollowersCollectionStatus(userId, partCount, requiredCallsCount)
returnValue = doGetFollowers(cursor=returnValue["next_cursor"])
except TwythonRateLimitError:
# wait a complete Twitter time interval
print("Sleeping until limit is lifted...")
if not self.sleepFor(self.TIME_INTERVAL_IN_SECONDS):
return # stop collecting
# try again
print("Retrying...")
self.displayFollowersCollectionStatus(userId, partCount, requiredCallsCount)
returnValue = doGetFollowers(cursor=returnValue["next_cursor"])
except TwythonError as e:
# Twitter is down / API changed / unknown error
# wait a few seconds and retry
print(e)
print("Retrying in %s seconds..." % self.RETRY_INTERVAL_IN_SECONDS)
if not self.sleepFor(self.RETRY_INTERVAL_IN_SECONDS):
return
if retry > self.MAX_RETRY_COUNT:
print("Max retry count reached. Aborting...")
return
print("Retrying... attempt %s/%s" % (retry, self.MAX_RETRY_COUNT))
return self.getFollowers(userId, getAll=getAll, retry=retry + 1)
partCount += 1
followers += returnValue["ids"]
elif returnValue["next_cursor"] is not 0:
print("Warning: you specified getAll=False but there was more than %s followers. Results are truncated!" % self.MAX_FOLLOWERS_PER_REQUEST)
return followers
def displayFollowersCollectionStatus(self, userId, partCount, requiredCallsCount):
print("Getting followers for user %s (part %s/%s)..." % (userId, partCount, requiredCallsCount))
def getUser(self, userId):
# define rate limited operation
def doGetUser():
return self.twitter.show_user(user_id=userId)
try:
print("Getting details for user %s..." % userId)
returnValue = doGetUser()
except TwythonRateLimitError:
# wait a complete Twitter time interval
print("Sleeping until limit is lifted...")
self.sleepFor(self.TIME_INTERVAL_IN_SECONDS)
# try again
print("Retrying...")
print("Getting details for user %s..." % userId)
returnValue = doGetUser()
print("done.")
screen_name = returnValue["screen_name"]
statuses_count = returnValue["statuses_count"]
friends_count = returnValue["friends_count"]
followers_count = returnValue["followers_count"]
lang = returnValue["lang"]
followersReady = True # don't collect followers of followers
user = User(id=userId, screen_name=screen_name, statuses_count=statuses_count, friends_count=friends_count, followers_count=followers_count, lang=lang, followers_ready=followersReady)
return user
class TwitterStreaming(TwythonStreamer):
'''Class to handle operations that deal with the Twitter Streaming API'''
def setParams(self, stopTweetCount, stopTime):
self.stopTweetCount = stopTweetCount
self.stopTime = stopTime
self.receivedTweetsCount = 0
self.startTime = time.time()
self.rateTweetCount = 0
self.rateLastTime = time.time()
def getRunningTime(self):
currentTime = time.time()
elapsedTime = currentTime - self.startTime # seconds
elapsedTime /= 60.0 # convert to minutes
return elapsedTime
def getRate(self):
'''Received tweets per minute'''
receivedTweets = self.rateTweetCount
timeInterval = (time.time() - self.rateLastTime) / 60.0 # minutes
rate = receivedTweets / timeInterval
self.rateLastTime = time.time()
self.rateTweetCount = 0
return rate
def on_success(self, data):
try:
text = data["text"]
if not isEnglishTweet(text):
print("Dropped tweet (considered non-english): " + text)
return
else:
print("ENGLISH: " + text)
except:
pass
try:
# User
author = data["user"]
userId = author["id"]
screen_name = author["screen_name"]
statuses_count = author["statuses_count"]
friends_count = author["friends_count"]
followers_count = author["followers_count"]
lang = author["lang"]
followersReady = False
userReady = True
user = User(id=userId, screen_name=screen_name, statuses_count=statuses_count, friends_count=friends_count, followers_count=followers_count, lang=lang, followers_ready=followersReady, user_ready=userReady)
if not User.objects.filter(id=userId).exists():
try:
user.save()
except:
print("ERROR: failed saving user")
# Tweet
tweetId = data["id_str"]
in_reply_to_status_id = data["in_reply_to_status_id"]
if in_reply_to_status_id is None:
in_reply_to_status_id = 0
hashtags = ""
try:
ht = data["entities"]["hashtags"]
tags = [x["text"] for x in ht]
hashtags = " ".join(tags)
except:
# this tweet doesn't contain any hashtag
pass
text = data["text"]
created_at = data["created_at"]
timestamp = time.strftime('%Y-%m-%d %H:%M:%S+00:00', time.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y'))
polarity = 0
polarityReady = False
tweet = Tweet(id=tweetId, user=user, in_reply_to_status_id=in_reply_to_status_id, text=text, created_at=timestamp, polarity=polarity, polarity_ready=polarityReady, hashtags=hashtags)
tweet.save()
# update received tweets count
self.receivedTweetsCount += 1
self.rateTweetCount += 1
# check stop conditions
if (self.receivedTweetsCount is not 0 and self.receivedTweetsCount >= self.stopTweetCount) or (self.stopTime is not 0 and self.getRunningTime() >= self.stopTime):
self.disconnect()
except KeyError:
'''Some tweets are badly sent and are missing user or text'''
pass
except DatabaseError:
'''Bad response (invalid JSON)'''
pass
except Exception as e:
print(e)
pass
def on_error(self, status_code, data):
print(status_code, data)
'''FUNCTIONS'''
def readStream(filterHashtag, stopTweetCount, stopTime, stream):
stream.setParams(stopTweetCount, stopTime)
try:
stream.statuses.filter(track=filterHashtag)
print("Stopped streaming")
except Exception as e:
print(e)
stream.disconnect()
print("Stopped streaming (timeout)")
updateHashtagsUsingSimilar(stream) # retry with similar hashtags
def launchStreamMonitor():
global stream
t = threading.Thread(target=monitorStream, args=(stream,))
t.daemon = True
print("Launched stream monitor...")
t.start()
def monitorStream(stream):
# monitor agent that checks incoming tweet rate
# relaunch stream with similar hashtags if rate is too low
while True:
try:
if stream.connected:
print("Stream is connected.")
tweetsPerMinute = stream.getRate()
print("Current rate: %s tweets/min" % tweetsPerMinute)
streamStartInterval = time.time() - stream.startTime
if tweetsPerMinute < incomingTweetRateThreshold and streamStartInterval >= streamTimeout:
print("Not receiving enough tweets matching this set of hashtags.")
print("Retrying with additional similar hashtags...")
updateHashtagsUsingSimilar(stream)
else:
print("Stream is disconnected.")
except Exception as e:
print(e)
print("WARNING: stream not instantiated. Stopping monitor...")
break
# sleep until next check
time.sleep(monitorCheckInterval)
def updateHashtagsUsingSimilar(stream):
currentTags = [tag.replace("#", "") for tag in streamingHashtag.split(",")]
similarTags = []
for tag in currentTags:
similar = getSimilarHashtags(tag)
similar = sorted(similar, key=lambda x: x[1], reverse=True)
similarCount = 2
similar = similar[:similarCount] # N most similar tags
similar = [tag for tag, unused in similar]
similarTags += similar
similarTags += currentTags
similarTags = set(similarTags) # remove duplicates
newFilterTags = ",".join(["#" + tag for tag in similarTags]) # convert to comma seperated string with leading # for each tag
print("Current tags: %s" % streamingHashtag)
print("New tags: %s" % newFilterTags)
# stop stream
stream.disconnect()
# restart with new tags
stopTweetCount = stream.stopTweetCount - stream.receivedTweetsCount
stopTime = stream.stopTime
del stream
startStreaming(newFilterTags, stopTweetCount, stopTime)
def startStreaming(filterHashtag="twitter", stopTweetCount=0, stopTime=0):
global streamingHashtag
streamingHashtag = filterHashtag
global stream
# start streaming
t = threading.Thread(target=readStream, args=(filterHashtag, stopTweetCount, stopTime, stream))
t.daemon = True
print("started reading stream...")
t.start()
def toggleStreaming():
global stream
if not stream.connected:
startStreaming()
else:
print("disconnecting stream...")
stream.disconnect()
# instantiate stream
stream = TwitterStreaming(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET, timeout=streamTimeout)
# monitor stream
if isStreamMonitorEnabled:
launchStreamMonitor()
| [
[
8,
0,
0.0069,
0.0115,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0161,
0.0023,
0,
0.66,
0.0286,
83,
0,
1,
0,
0,
83,
0,
0
],
[
1,
0,
0.0184,
0.0023,
0,
0.66,
... | [
"'''\nCreated on 7 juin 2013\n\n@author: Nils Amiet\n'''",
"import threading",
"import time",
"import math",
"from twython import TwythonStreamer, Twython, TwythonRateLimitError",
"from twython.exceptions import TwythonError",
"from requests.exceptions import ConnectionError",
"from django.db import D... |
# Create your views here.
import math
import networkx as nx
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db import connection
from ITInfluence.models import Tweet, User
from ITInfluence.models import InfrarougeUser, InfrarougeForum, InfrarougeForumThread, InfrarougeThreadMessage
from ITInfluence import twitter, followers, NDI
from ITInfluence.plot import GraphPlotter, TwoDimensionalValuesPlotter
from ITInfluence.hashtags import getAllHashtags, getSimilarHashtags
from ITInfluence.infrarouge import InfrarougeGraphBuilder
from ITInfluence.TwitterGraphBuilder import TwitterGraphBuilder, TwitterFollowersGraphBuilder
from Tools.Timer import timer
from SentiStrength import sentistrength
from InfrarougeTwitterInfluence.settings import DATABASES
'''VIEWS'''
def index(request):
return redirect("/infrarouge/")
def twitterStats(request):
NDIPath = "/twitter/images/ndi/"
NDITimePath = "/twitter/images/ndi-time/"
NDIRepliesCountPath = "/twitter/images/ndi-replies-count/"
repliesGraphVisualizationPath = "/twitter/images/replies-graph/"
followersGraphVisualizationPath = "/twitter/images/followers-graph/"
tweetCount = Tweet.objects.count()
### Use one of the two methods (slow but exact count or fast estimation)
## METHOD 1
# Uncomment for exact count (WARNING: slow for large tables)
# userCount = User.objects.count()
# friendshipCount = Friendship.objects.count()
## METHOD 2
# Uncomment for fast count using approximation
databaseName = DATABASES["default"]["NAME"]
userTableName = "ITInfluence_user"
friendshipTableName = "ITInfluence_friendship"
query = """
SELECT table_name, table_rows
FROM information_schema.tables
WHERE table_schema = %s
"""
cursor = connection.cursor()
cursor.execute(query, [databaseName])
response = cursor.fetchall()
userCount = 0
friendshipCount = 0
for row in response:
if row[0].lower() == userTableName.lower():
userCount = row[1]
if row[0].lower() == friendshipTableName.lower():
friendshipCount = row[1]
cursor.close()
### END
context = {
"tweetCount": tweetCount,
"userCount": userCount,
"friendshipCount": friendshipCount,
'NDIPath': NDIPath,
'NDITimePath': NDITimePath,
'NDIRepliesCountPath': NDIRepliesCountPath,
"repliesGraphVisualizationPath": repliesGraphVisualizationPath,
"followersGraphVisualizationPath": followersGraphVisualizationPath,
}
return render_to_response("ITInfluence/twitter.html", context, context_instance=RequestContext(request))
def twitterBrowseHashtags(request):
hashtags = getAllHashtags()
minimumOccurences = 10
hashtags = [(tag, occurences, fontSize(occurences)) for (tag, occurences) in hashtags if occurences >= minimumOccurences]
context = {
"hashtags": hashtags,
"minimumOccurences": minimumOccurences,
}
return render_to_response("ITInfluence/twitter-browse-hashtags.html", context, context_instance=RequestContext(request))
def twitterHashtag(request, hashtag=""):
similarHashtags = getSimilarHashtags(hashtag)
similarHashtags = [(tag, occurences, fontSize(occurences)) for (tag, occurences) in similarHashtags]
context = {
"hashtag": hashtag,
"similarHashtags": similarHashtags,
}
return render_to_response("ITInfluence/hashtag.html", context, context_instance=RequestContext(request))
def twitterBrowseTweets(request):
classifyTweets() # using sentistrength
try:
tweetsPerPage = int(request.GET.get("tpp"))
if tweetsPerPage <= 0:
raise Exception("Tweets per page can't be negative")
except:
tweetsPerPage = 20
tweets = Tweet.objects.all().order_by("-created_at")
paginator = Paginator(tweets, tweetsPerPage)
tweetCount = Tweet.objects.count()
page = request.GET.get("page")
try:
tweets = paginator.page(page)
except PageNotAnInteger:
tweets = paginator.page(1)
except EmptyPage:
tweets = paginator.page(paginator.num_pages)
context = {
"tweets": tweets,
"tweetCount": tweetCount,
"tweetsPerPage": tweetsPerPage,
}
return render_to_response("ITInfluence/twitter-browse-tweets.html", context, context_instance=RequestContext(request))
def twitterFollowersCountRanking(request):
users = User.objects.all().filter(user_ready=1).order_by("-followers_count")
rankingLength = 30 # top n users
users = users[:rankingLength]
context = {
"users": users,
}
return render_to_response("ITInfluence/twitter-followers-ranking.html", context, context_instance=RequestContext(request))
def twitterStartCollectingStream(request, hashtag):
context = {
}
return render_to_response("ITInfluence/twitter-collect-stream.html", context, context_instance=RequestContext(request))
def twitterShowCollectForm(request):
totalTimeToCollectFolowers = followers.getTotalTimeToCollectFollowers()
remainingTimeToCollectFolowers = followers.getRemainingTimeToCollectFollowers()
twoDecimalFormat = "{0:.2f}"
followersTimeDays = twoDecimalFormat.format(totalTimeToCollectFolowers[0])
followersTimeHours = twoDecimalFormat.format(totalTimeToCollectFolowers[1])
followersTimeMinutes = twoDecimalFormat.format(totalTimeToCollectFolowers[2])
followersRemainingTimeDays = twoDecimalFormat.format(remainingTimeToCollectFolowers[0])
followersRemainingTimeHours = twoDecimalFormat.format(remainingTimeToCollectFolowers[1])
followersRemainingTimeMinutes = twoDecimalFormat.format(remainingTimeToCollectFolowers[2])
try:
followersProgressPercentage = 100 - 100 * (remainingTimeToCollectFolowers[2] / totalTimeToCollectFolowers[2])
except:
followersProgressPercentage = 100
followersProgressPercentage = twoDecimalFormat.format(followersProgressPercentage)
context = {
"isCollectingFollowers": followers.rest.isCollectingFollowers,
"followersTimeDays": followersTimeDays,
"followersTimeHours": followersTimeHours,
"followersTimeMinutes": followersTimeMinutes,
"followersRemainingTimeDays": followersRemainingTimeDays,
"followersRemainingTimeHours": followersRemainingTimeHours,
"followersRemainingTimeMinutes": followersRemainingTimeMinutes,
"followersProgressPercentage": followersProgressPercentage,
}
try:
hashtagSeparator = ","
# add a leading # to all tags seperated by commas
hashtags = ["#" + tag.strip() for tag in request.POST["hashtag"].split(hashtagSeparator)]
filterTag = hashtagSeparator.join(hashtags)
stopTweetCount = int(request.POST["stopTweetCount"])
stopTime = int(request.POST["stopTime"])
# start collecting tweets
twitter.startStreaming(filterHashtag=filterTag, stopTweetCount=stopTweetCount, stopTime=stopTime)
except:
pass
context["isStreaming"] = twitter.stream.connected
context["streamingHashtag"] = twitter.streamingHashtag
return render_to_response("ITInfluence/twitter-collect-form.html", context, context_instance=RequestContext(request))
def twitterToggleCollectingFollowers(request):
followers.toggleFollowersCollection()
return redirect("/twitter/collect/")
def infrarougeStats(request):
userCount = InfrarougeUser.objects.count()
forumCount = InfrarougeForum.objects.count()
forumThreadCount = InfrarougeForumThread.objects.count()
threadMessageCount = InfrarougeThreadMessage.objects.count()
NDIPath = "/infrarouge/images/ndi/"
NDITimePath = "/infrarouge/images/ndi-time/"
NDIRepliesCountPath = "/infrarouge/images/ndi-replies-count/"
repliesGraphVisualizationPath = "/infrarouge/images/replies-graph/"
userDiscussionsGraphVisualizationPath = "/infrarouge/images/user-discussion-graph/"
context = {
'forumCount': forumCount,
'forumThreadCount': forumThreadCount,
'threadMessageCount': threadMessageCount,
'userCount': userCount,
'NDIPath': NDIPath,
'NDITimePath': NDITimePath,
'NDIRepliesCountPath': NDIRepliesCountPath,
'repliesGraphVisualizationPath': repliesGraphVisualizationPath,
'userDiscussionsGraphVisualizationPath': userDiscussionsGraphVisualizationPath,
}
return render_to_response("ITInfluence/infrarouge.html", context, context_instance=RequestContext(request))
def getInfrarougeForums(request):
forums = InfrarougeForum.objects.all()
context = {
"forums": forums,
}
return render_to_response("ITInfluence/infrarouge-forums.html", context, context_instance=RequestContext(request))
def twitterStopStreaming(request):
twitter.toggleStreaming()
return redirect("/twitter/collect/")
'''Image views'''
def showImage(request, path):
'''Wrapper view that displays the image at given path on a html page'''
context ={
"path": path,
}
return render_to_response("ITInfluence/show-image.html", context, context_instance=RequestContext(request))
# These views do NOT return HTML. They return PNG images.
def getInfrarougeNDI(request):
with timer():
resultsEqualTime, resultsEqualRepliesCount = NDI.polarizationForAllForums()
plotter = TwoDimensionalValuesPlotter()
xlabel = "Interval"
ylabel = "NDI"
response = plotter.memoryPlotMultipleDatasetsMultidimensional([resultsEqualTime, resultsEqualRepliesCount], xlabel, ylabel)
return response
def getInfrarougeNDITimeFigure(request):
with timer():
resultsEqualTime, unused = NDI.polarizationForAllForums(equalRepliesCount=False)
plotter = TwoDimensionalValuesPlotter()
xlabel = "Interval (equal time)"
ylabel = "NDI"
response = plotter.memoryPlotMultipleDatasets(resultsEqualTime, xlabel, ylabel)
return response
def getInfrarougeNDIReplyCountFigure(request):
with timer():
unused, resultsEqualRepliesCount = NDI.polarizationForAllForums(equalTime=False)
plotter = TwoDimensionalValuesPlotter()
xlabel = "Interval (equal reply count)"
ylabel = "NDI"
response = plotter.memoryPlotMultipleDatasets(resultsEqualRepliesCount, xlabel, ylabel)
return response
def getInfrarougeRepliesGraph(request):
with timer():
infrarougeGraphBuilder = InfrarougeGraphBuilder(NDI.infrarougeDatabasePath)
graphPlotter = GraphPlotter()
graph = infrarougeGraphBuilder.graph1
nodeLabel="User"
edgeLabel="Reply"
response = graphPlotter.memoryPlot(graph, nodeLabel=nodeLabel, edgeLabel=edgeLabel)
return response
def getInfrarougeUserDiscussionGraph(request):
with timer():
infrarougeGraphBuilder = InfrarougeGraphBuilder(NDI.infrarougeDatabasePath)
graphPlotter = GraphPlotter()
graph = infrarougeGraphBuilder.graph2
# compute bipartite positions for nodes
pos = graphPlotter.bipartiteNodePositions(graph)
nodeLabel="User"
nodeLabel2="Discussion"
edgeLabel="Participation"
response = graphPlotter.memoryPlot(graph, bipartite=True, pos=pos, nodeLabel=nodeLabel, nodeLabel2=nodeLabel2, edgeLabel=edgeLabel)
return response
def getTwitterNDI(request):
with timer(): # measure execution time
resultsEqualTime, resultsEqualRepliesCount = NDI.getNDIForMostFrequentHashtags()
plotter = TwoDimensionalValuesPlotter()
xlabel = "Interval"
ylabel = "NDI"
response = plotter.memoryPlotMultipleDatasetsMultidimensional([resultsEqualTime, resultsEqualRepliesCount], xlabel, ylabel)
return response
def getTwitterNDISingleHashtag(request, hashtag):
with timer():
resultsEqualTime, resultsEqualRepliesCount = NDI.getNDIForHashtag(hashtag)
plotter = TwoDimensionalValuesPlotter(width=800, height=600)
xlabel = "Interval"
ylabel = "NDI"
response = plotter.memoryPlotMultipleDatasetsMultidimensional([resultsEqualTime, resultsEqualRepliesCount], xlabel, ylabel)
return response
def getTwitterNDITimeFigure(request):
with timer():
resultsEqualTime, unused = NDI.getNDIForMostFrequentHashtags(equalRepliesCount=False)
plotter = TwoDimensionalValuesPlotter()
xlabel = "Interval (equal time)"
ylabel = "NDI"
response = plotter.memoryPlotMultipleDatasets(resultsEqualTime, xlabel, ylabel)
return response
def getTwitterNDIReplyCountFigure(request):
with timer():
unused, resultsEqualRepliesCount = NDI.getNDIForMostFrequentHashtags(equalTime=False)
plotter = TwoDimensionalValuesPlotter()
xlabel = "Interval (equal time)"
ylabel = "NDI"
response = plotter.memoryPlotMultipleDatasets(resultsEqualRepliesCount, xlabel, ylabel)
return response
def getTwitterRepliesGraph(request):
with timer():
twitterGraphBuilder = TwitterGraphBuilder()
graph = twitterGraphBuilder.graph
# save graph to gml file
# filename = "replies_graph.gml"
# saveGraph(graph, filename)
graphPlotter = GraphPlotter()
print("Plotting graph...")
response = graphPlotter.memoryPlot(graph)
print("...done!")
return response
def getTwitterFollowersGraph(request):
with timer():
twitterGraphBuilder = TwitterFollowersGraphBuilder()
graph = twitterGraphBuilder.graph
# save graph to gml file
# filename = "followers_graph.gml"
# saveGraph(graph, filename)
graphPlotter = GraphPlotter()
nodeLabel="User"
edgeLabel="Friendship"
print("Plotting graph...")
response = graphPlotter.memoryPlot(graph, nodeSizes=twitterGraphBuilder.nodeSizes, nodeColor=twitterGraphBuilder.nodeSizes, nodeLabel=nodeLabel, edgeLabel=edgeLabel)
print("...done!")
return response
'''HELPER FUNCTIONS'''
def saveGraph(graph, filename):
nx.gml.write_gml(graph, filename)
def fontSize(occurences):
return 1 + math.log(occurences)
def classifyTweets():
# only classify tweets that are not classified yet
tweets = Tweet.objects.all().filter(polarity_ready=False)
if len(tweets) > 0:
s = sentistrength.SentiStrength()
# concatenate all texts to annotate at once
text = ""
for tweet in tweets:
text += tweet.text.replace("\n", " ") + "\n"
polarities = s.classifiy(text)
# update tweets with their polarity
for tweet, polarity in zip(tweets, polarities):
tweet.polarity = polarity
tweet.polarity_ready = True
tweet.save() | [
[
1,
0,
0.0049,
0.0024,
0,
0.66,
0,
526,
0,
1,
0,
0,
526,
0,
0
],
[
1,
0,
0.0073,
0.0024,
0,
0.66,
0.0222,
691,
0,
1,
0,
0,
691,
0,
0
],
[
1,
0,
0.0121,
0.0024,
0,
... | [
"import math",
"import networkx as nx",
"from django.shortcuts import render_to_response, redirect",
"from django.template import RequestContext",
"from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger",
"from django.db import connection",
"from ITInfluence.models import Tweet, User",... |
'''
Created on 12 juin 2013
@author: Nils Amiet
'''
# Database routers are used to know which model should be used with which database.
# This is useful in our case because we have multiple databases.
class TwitterRouter():
def db_for_read(self, model, **hints):
if hasattr(model._meta, "isTwitterModel"):
return "default"
else:
return "infrarouge"
def db_for_write(self, model, **hints):
if hasattr(model._meta, "isTwitterModel"):
return "default"
else:
return "infrarouge"
def allow_syncdb(self, db, model):
isTwitterModel = hasattr(model._meta, "isTwitterModel")
if isTwitterModel:
return db == "default"
else:
return not db == "default" | [
[
8,
0,
0.1071,
0.1786,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
3,
0,
0.6786,
0.6786,
0,
0.66,
1,
87,
0,
3,
0,
0,
0,
0,
3
],
[
2,
1,
0.4643,
0.1786,
1,
0.35,
0,... | [
"'''\nCreated on 12 juin 2013\n\n@author: Nils Amiet\n'''",
"class TwitterRouter():\n def db_for_read(self, model, **hints):\n if hasattr(model._meta, \"isTwitterModel\"):\n return \"default\"\n else:\n return \"infrarouge\"\n\n def db_for_write(self, model, **hints):",
... |
'''
Created on 23 mars 2013
@author: Nils Amiet
'''
# coding: utf-8
'''
Graph 1:
Replies graph
Weighted directed graph
Weight of edge from A to B = number of replies from user A to user B
Graph 2:
User-Discussion graph
Bipartite graph
Edge from User A to discussion D => user A participates in discussion D
'''
import networkx as nx
import networkx.readwrite.gml as gml
import sqlite3
import random
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
'''Config'''
databasePath = "../../../../../InfrarougeGrabber/infrarouge.db"
graph1Path = "graph1.gml"
graph2Path = "graph2.gml"
userRepliesCounts = {}
graph1 = nx.DiGraph()
userParticipations = []
graph2 = nx.Graph()
def users():
users = []
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
query = "SELECT * FROM user"
cursor.execute(query)
for user in cursor:
users += [user]
cursor.close()
return users
def getForums():
forums = []
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
query = """
SELECT *
FROM forum
"""
cursor.execute(query)
for forum in cursor:
builtForum = {
"id": forum[0],
"title": forum[1],
"description": forum[2],
}
forums += [builtForum]
return forums
def polarityReplies(forum=None):
'''Returns a list of replies containing the source, destination, polarity, timestamp and forum id'''
replies = []
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
query = """
SELECT r.fkfrom, r.fkto, m.polarity, m.timestamp, f.id
FROM reply as r
INNER JOIN threadmessage as m
ON r.fkthreadmessage=m.id
INNER JOIN forumthread as t
ON m.fkforumthread=t.id
INNER JOIN forum as f
ON t.fkparentforum=f.id
"""
if forum is not None:
query += "WHERE f.id=" + str(forum)
cursor.execute(query)
for reply in cursor:
builtReply = {
"from": reply[0],
"to": reply[1],
"polarity": reply[2],
"timestamp": reply[3],
"forum": reply[4]
}
replies += [builtReply]
cursor.close()
return replies
def countUserReplies():
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
query = "SELECT * FROM reply"
cursor.execute(query)
for reply in cursor:
countReply(reply)
cursor.close()
def countReply(reply):
global userRepliesCounts
fromUser = reply[0]
toUser = reply[1]
# discussion = reply[2]
fromTo = (fromUser, toUser)
try:
userRepliesCounts[fromTo] += 1
except KeyError:
userRepliesCounts[fromTo] = 1
def createRepliesGraph():
global graph1
for fromTo, w in userRepliesCounts.items():
graph1.add_edge(fromTo[0], fromTo[1], weight=w)
def buildUserParticipations():
global userParticipations
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
query = "SELECT * FROM threadmessage"
cursor.execute(query)
for threadMessage in cursor:
forumThread = threadMessage[2]
user = threadMessage[3]
userThreadTuple = (user, forumThread)
userParticipations += [userThreadTuple]
cursor.close()
def createParticipationGraph():
global graph2
users = [x[0] for x in userParticipations]
discussions = [x[1] for x in userParticipations]
graph2.add_nodes_from(users, bipartite=0)
graph2.add_nodes_from(discussions, bipartite=1)
graph2.add_edges_from(userParticipations)
def saveGraphs():
gml.write_gml(graph1, graph1Path)
gml.write_gml(graph2, graph2Path)
def drawGraph(graph, filename, dpi):
pos=nx.spring_layout(graph)
# pos = nx.shell_layout(graph)
# pos = nx.graphviz_layout(graph)
nx.draw(graph, pos=pos)
plt.savefig(filename, dpi=dpi) # save as png
if __name__ == "__main__":
countUserReplies()
createRepliesGraph()
buildUserParticipations()
createParticipationGraph()
saveGraphs()
print(len(graph1.nodes()))
print(len(graph2.nodes()))
print(polarityReplies(forum=1))
print(getForums())
drawGraph(graph1, "g1.png", 180) | [
[
8,
0,
0.015,
0.025,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
8,
0,
0.07,
0.055,
0,
0.66,
0.037,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.105,
0.005,
0,
0.66,
0.0741... | [
"'''\nCreated on 23 mars 2013\n\n@author: Nils Amiet\n'''",
"'''\nGraph 1:\nReplies graph\nWeighted directed graph\nWeight of edge from A to B = number of replies from user A to user B\n\nGraph 2:\nUser-Discussion graph",
"import networkx as nx",
"import networkx.readwrite.gml as gml",
"import sqlite3",
"... |
'''
Created on 23 mai 2013
@author: Nils Amiet
'''
# coding: utf-8
class PolarityCounter:
polarityCounts = {}
RECEIVED = "recv"
SENT = "sent"
AVERAGE = "avg"
NDI = -1 # network disagreement index
def __init__(self, replies):
'''
Replies: a list of replies with the attributes "from", "to" and "polarity"
'''
self.replies = replies
self.edges = []
self.countPolarities()
self.computeAveragePolarities()
self.computeNDI()
def countPolarities(self):
for reply in self.replies:
fromUser = reply["from"]
toUser = reply["to"]
polarity = reply["polarity"]
polarity = 0 if polarity <= 0 else 1
try:
fromUserCounts = self.polarityCounts[fromUser][self.SENT]
except KeyError:
self.polarityCounts[fromUser] = {self.RECEIVED : {}, self.SENT: {}, self.AVERAGE: None}
fromUserCounts = self.polarityCounts[fromUser][self.SENT]
try:
toUserCounts = self.polarityCounts[toUser][self.RECEIVED]
except KeyError:
self.polarityCounts[toUser] = {self.RECEIVED : {}, self.SENT: {}, self.AVERAGE: None}
toUserCounts = self.polarityCounts[toUser][self.RECEIVED]
try:
fromUserCounts[polarity] += 1
except:
fromUserCounts[polarity] = 1
try:
toUserCounts[polarity] += 1
except:
toUserCounts[polarity] = 1
def computeAveragePolarities(self):
for user, userCounts in self.polarityCounts.items():
try:
receivedPositive = userCounts[self.RECEIVED][1]
except:
receivedPositive = 0
try:
receivedNegative = userCounts[self.RECEIVED][0]
except:
receivedNegative = 0
try:
sentPositive = userCounts[self.SENT][1]
except:
sentPositive = 0
try:
sentNegative = userCounts[self.SENT][0]
except:
sentNegative = 0
try:
recv = (receivedPositive - receivedNegative) / float(receivedPositive + receivedNegative)
except:
pass # user never received a message
try:
sent = (sentPositive - sentNegative) / float(sentPositive + sentNegative)
except:
pass # user never sent a message
try:
userCounts[self.AVERAGE] = abs(recv - sent)
except:
pass # user never received or sent a message
def computeNDI(self):
self.computeEdges()
sumNDI = 0
for edge in self.edges:
weight = 1
firstUser = edge["from"]
secondUser = edge["to"]
try:
firstUserOpinion = self.polarityCounts[firstUser][self.AVERAGE] / float(2)
except:
firstUserOpinion = 0
try:
secondUserOpinion = self.polarityCounts[secondUser][self.AVERAGE] / float(2)
except:
secondUserOpinion = 0
# print (firstUserOpinion, secondUserOpinion)
increment = weight * ((firstUserOpinion - secondUserOpinion)**2)
sumNDI += increment
# print("NDI=" + str(sumNDI) + " (+ " + str(increment) + ")")
self.NDI = sumNDI
def computeEdges(self):
for reply in self.replies:
fromUser = reply["from"]
toUser = reply["to"]
if not self.contains(reply, self.edges):
self.edges += [reply]
def contains(self, reply, edges):
for edge in edges:
if self.isSameMessage(reply, edge):
return True
return False
def isSameMessage(self, reply, reply2):
return reply["from"] == reply2["from"] and reply["to"] == reply2["to"] or reply["from"] == reply2["to"] and reply["to"] == reply2["from"]
def polarityDifferenceRanking(self):
users = {user: userCounts[self.AVERAGE] for user, userCounts in self.polarityCounts.items() if not userCounts[self.AVERAGE] == None}
print(users)
ranking = sorted(users.items(), key=lambda x: x[1], reverse=True)
print(ranking)
return ranking
if __name__ == "__main__":
r1 = {"from": 4, "to": 5, "polarity": 1}
r2 = {"from": 5, "to": 4, "polarity": 1}
r3 = {"from": 4, "to": 2, "polarity": 0}
replies = [r1, r2, r3]
polarityCounter = PolarityCounter(replies)
print(polarityCounter.polarityCounts)
polarityCounter.polarityDifferenceRanking() | [
[
8,
0,
0.0205,
0.0342,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
3,
0,
0.4966,
0.8767,
0,
0.66,
0.5,
151,
0,
8,
0,
0,
0,
0,
17
],
[
14,
1,
0.0753,
0.0068,
1,
0.22,
... | [
"'''\nCreated on 23 mai 2013\n\n@author: Nils Amiet\n'''",
"class PolarityCounter:\n \n polarityCounts = {}\n RECEIVED = \"recv\"\n SENT = \"sent\"\n AVERAGE = \"avg\"\n NDI = -1 # network disagreement index",
" polarityCounts = {}",
" RECEIVED = \"recv\"",
" SENT = \"sent\"",
"... |
'''
Created on 4 avr. 2013
@author: Nils Amiet
'''
# coding: utf-8
graph1Path = "graph1.gml"
graph2Path = "graph2.gml"
rankingsFile = "rankings.html"
import networkx as nx
import networkx.readwrite.gml as gml
import operator
import math
import copy
import os
import sys
import sqlite3
''' Required for running the script from anywhere outside eclipse'''
os.chdir(os.path.dirname(os.path.realpath(__file__)))
sys.path.append('../../../../../InfrarougeGraphBuilder')
from ch.hearc.infrarouge.graphbuilder.HTMLTableGenerator import HTMLTableGenerator
from ch.hearc.infrarouge.graphbuilder.main import users, polarityReplies, getForums, databasePath
from ch.hearc.infrarouge.graphbuilder.polarity import PolarityCounter
sqlUsers = users()
averageRankings = []
def readGraphs():
g1 = gml.read_gml(graph1Path, relabel=True)
g2 = gml.read_gml(graph2Path, relabel=True)
return (g1, g2)
def getSortedTop(centrality):
return sorted(centrality.items(), key=operator.itemgetter(1), reverse=True)
def writeHTML(filename, headings, rows, readWriteMode='w'):
html = HTMLTableGenerator(th=headings, rows=rows, users=sqlUsers)
with open(filename, readWriteMode) as file:
file.write(str(html))
def writeRawHTML(filename, headings, rows, readWriteMode='w'):
htmlGenerator = HTMLTableGenerator(th=headings, rows=rows, users=sqlUsers)
html = htmlGenerator.raw()
with open(filename, readWriteMode) as file:
file.write(str(html))
def writeHeader(filename):
html = HTMLTableGenerator(title="Infrarouge Centrality rankings")
with open(filename, 'w') as file:
file.write(str(html.header()))
def writeFooter(filename):
html = HTMLTableGenerator()
with open(filename, 'a') as file:
file.write(str(html.footer()))
def writeH1(filename, text):
html = HTMLTableGenerator()
with open(filename, 'a') as file:
file.write(html.h1(text))
def createRanking(graph, ev=True, directed=False):
# general graph information
headings = ["#nodes", "#edges", "directed", "weighted", "bipartite"]
rows = []
if directed:
weighted = True
bipartite = False
else:
weighted = False
bipartite = True
rows += [(str(len(graph.nodes())), str(len(graph.edges())), str(directed), str(weighted), str(bipartite))]
writeRawHTML(rankingsFile, headings, rows, readWriteMode='a')
# centrality rankings
degreeCentrality = nx.degree_centrality(graph)
if directed:
inDegreeCentrality = nx.in_degree_centrality(graph)
outDegreeCentrality = nx.out_degree_centrality(graph)
closenessCentrality = nx.closeness_centrality(graph)
betweennessCentrality = nx.betweenness_centrality(graph)
nbNodes = len(graph.nodes())
topLength = 15
degreeRanking = getSortedTop(degreeCentrality)
if directed:
inDegreeRanking = getSortedTop(inDegreeCentrality)
outDegreeRanking = getSortedTop(outDegreeCentrality)
closenessRanking = getSortedTop(closenessCentrality)
betweennessRanking = getSortedTop(betweennessCentrality)
headings = ["user id", "username", "degree centrality"]
writeHTML(rankingsFile, headings, degreeRanking[:topLength], readWriteMode='a')
if directed:
headings = ["user id", "username", "in-degree centrality"]
writeHTML(rankingsFile, headings, inDegreeRanking[:topLength], readWriteMode='a')
headings = ["user id", "username", "out-degree centrality"]
writeHTML(rankingsFile, headings, outDegreeRanking[:topLength], readWriteMode='a')
headings = ["user id", "username", "closeness centrality"]
writeHTML(rankingsFile, headings, closenessRanking[:topLength], readWriteMode='a')
headings = ["user id", "username", "betweenness centrality"]
writeHTML(rankingsFile, headings, betweennessRanking[:topLength], readWriteMode='a')
if ev:
evCentrality = nx.eigenvector_centrality(graph, max_iter=500)
evRanking = getSortedTop(evCentrality)
headings = ["user id", "username", "eigenvector centrality"]
writeHTML(rankingsFile, headings, evRanking[:topLength], readWriteMode='a')
clusteringCoefficientRanking = computeClusteringCoefficient(graph, directed)
# clustering coefficient
headings = ["user id", "username", "clustering coefficient"]
writeHTML(rankingsFile, headings, clusteringCoefficientRanking[:topLength], readWriteMode='a')
# fraction of common users
commonNodesStatsDCBE = []
commonNodesStatsDE = []
commonNodesStatsCE = []
commonNodesStatsBE = []
commonNodesStatsDC = []
commonNodesStatsDB = []
commonNodesStatsBC = []
commonNodesStatsDCB = []
fraction = 1
while fraction <= 20: # from 1% to 20%
if ev:
commonNodesFractionDCBE = computeFractionOfCommonUsers(fraction/100.0, degreeRanking, closenessRanking, betweennessRanking, evRanking)
commonNodesFractionDE = computeFractionOfCommonUsers(fraction/100.0, degreeRanking, evRanking)
commonNodesFractionCE = computeFractionOfCommonUsers(fraction/100.0, closenessRanking, evRanking)
commonNodesFractionBE = computeFractionOfCommonUsers(fraction/100.0, betweennessRanking, evRanking)
commonNodesStatsDCBE += [(str(fraction) + "%", str(100*commonNodesFractionDCBE) + "%")]
commonNodesStatsDE += [(str(fraction) + "%", str(100*commonNodesFractionDE) + "%")]
commonNodesStatsCE += [(str(fraction) + "%", str(100*commonNodesFractionCE) + "%")]
commonNodesStatsBE += [(str(fraction) + "%", str(100*commonNodesFractionBE) + "%")]
commonNodesFractionDCB = computeFractionOfCommonUsers(fraction/100.0, degreeRanking, closenessRanking, betweennessRanking)
commonNodesFractionDC = computeFractionOfCommonUsers(fraction/100.0, degreeRanking, closenessRanking)
commonNodesFractionDB = computeFractionOfCommonUsers(fraction/100.0, degreeRanking, betweennessRanking)
commonNodesFractionBC = computeFractionOfCommonUsers(fraction/100.0, betweennessRanking, closenessRanking)
commonNodesStatsDC += [(str(fraction) + "%", str(100*commonNodesFractionDC) + "%")]
commonNodesStatsDB += [(str(fraction) + "%", str(100*commonNodesFractionDB) + "%")]
commonNodesStatsBC += [(str(fraction) + "%", str(100*commonNodesFractionBC) + "%")]
commonNodesStatsDCB += [(str(fraction) + "%", str(100*commonNodesFractionDCB) + "%")]
fraction += 1
if ev:
headings = ["top proportion", "fraction of common users [DC, CC, BC, EC]"]
writeRawHTML(rankingsFile, headings, commonNodesStatsDCBE, readWriteMode='a')
headings = ["top proportion", "fraction of common users [DC, CC, BC]"]
writeRawHTML(rankingsFile, headings, commonNodesStatsDCB, readWriteMode='a')
topNumber = "top proportion"
if ev:
headingsDE = [topNumber, "fraction of common users [DC, EC]"]
headingsCE = [topNumber, "fraction of common users [CC, EC]"]
headingsBE = [topNumber, "fraction of common users [BC, EC]"]
writeRawHTML(rankingsFile, headingsDE, commonNodesStatsDE, readWriteMode='a')
writeRawHTML(rankingsFile, headingsCE, commonNodesStatsCE, readWriteMode='a')
writeRawHTML(rankingsFile, headingsBE, commonNodesStatsBE, readWriteMode='a')
headingsDC = [topNumber, "fraction of common users [DC, CC]"]
headingsDB = [topNumber, "fraction of common users [DC, BC]"]
headingsBC = [topNumber, "fraction of common users [BC, CC]"]
writeRawHTML(rankingsFile, headingsDC, commonNodesStatsDC, readWriteMode='a')
writeRawHTML(rankingsFile, headingsDB, commonNodesStatsDB, readWriteMode='a')
writeRawHTML(rankingsFile, headingsBC, commonNodesStatsBC, readWriteMode='a')
# top 15 common nodes
topn = 15
if ev:
fraction = topn / float(nbNodes)
commonNodesTop15Fraction = computeFractionOfCommonUsers(fraction, degreeRanking, closenessRanking, betweennessRanking, evRanking)
commonNodesTop15Stats = [("Top " + str(topn), str(int(commonNodesTop15Fraction*topn)) + " (" + str(100*commonNodesTop15Fraction) + "%)")]
topNumber = "top number"
headings = [topNumber, "Common users [DC, CC, BC, EC] in top " + str(topn)]
writeRawHTML(rankingsFile, headings, commonNodesTop15Stats, readWriteMode='a')
# average rank
topNodes = []
if ev:
# compute average of all 4
rankings = [degreeRanking, closenessRanking, betweennessRanking, evRanking]
else:
# compute average of all 3
rankings = [degreeRanking, closenessRanking, betweennessRanking]
for ranking in rankings:
topNodes += [x[0] for x in ranking]
topNodes = set(topNodes)
averageRankStats = []
for node in topNodes:
averageRank = 0
for ranking in rankings:
try:
rank = [x[0] for x in ranking].index(node)
except:
print("Warning: Node not in ranking!")
averageRank += rank
averageRank /= float(len(rankings))
averageRank += 1
averageRankStats += [(node, averageRank)]
averageRankStats = sorted(averageRankStats, key=operator.itemgetter(1), reverse=False)
global averageRankings
averageRankings += [averageRankStats]
headings = ["node id", "username", "average rank"]
writeHTML(rankingsFile, headings, averageRankStats[:topn], readWriteMode='a')
# top n% subgraph
limit = 7
for topPercent in range(10, 100, 15):
topNumber = int(nbNodes * topPercent / float(100))
topNodes = [x[0] for x in averageRankStats][:topNumber]
topNodesAndNeighbors = copy.deepcopy(topNodes)
for node in topNodes:
topNodesAndNeighbors += graph.neighbors(node)
subGraph = nx.subgraph(graph, topNodes)
subGraphWithNeighbors = nx.subgraph(graph, topNodesAndNeighbors)
filename = "graph" + ("1" if directed else "2") + "top" + str(topPercent) + "percent.gml"
saveGraph(subGraph, filename)
clusteringCoefficientRanking = computeClusteringCoefficient(subGraph, directed)
clusteringCoefficientNeighborsRanking = computeClusteringCoefficient(subGraphWithNeighbors, directed)
clusteringCoefficientNeighborsRanking = [x for x in clusteringCoefficientNeighborsRanking if x[0] in topNodes]
headings = ["user id", "username", "clustering coefficient for top" + str(topPercent) + "% subgraph"]
writeHTML(rankingsFile, headings, clusteringCoefficientRanking[:limit], readWriteMode='a')
headings = ["user id", "username", "clustering coefficient (with neighbors) for top" + str(topPercent) + "% subgraph"]
writeHTML(rankingsFile, headings, clusteringCoefficientNeighborsRanking[:limit], readWriteMode='a')
def computeClusteringCoefficient(graph, directed):
if not directed:
clusteringCoefficient = nx.clustering(graph)
else:
g = nx.Graph()
g.add_nodes_from(graph.nodes())
g.add_edges_from(graph.edges())
clusteringCoefficient = nx.clustering(g)
clusteringCoefficientRanking = getSortedTop(clusteringCoefficient)
return clusteringCoefficientRanking
def saveGraph(graph, filename):
gml.write_gml(graph, filename)
def computeFractionOfCommonUsers(fraction, *rankings):
if len(rankings) > 1:
nbNodes = len(rankings[0])
portionSize = math.ceil(fraction * nbNodes)
commonNodes = set([x[0] for x in rankings[0]][:portionSize])
for ranking in rankings:
rankingPortion = [x[0] for x in ranking][:portionSize]
commonNodes = [x for x in commonNodes if x in rankingPortion]
commonNodesFraction = len(commonNodes) / float(portionSize)
return commonNodesFraction
raise Exception("expected at least 2 rankings")
def createGeneralRanking():
generalRanking = {}
generalRankingNodeOccurences = {}
for ranking in averageRankings:
for nodeRankTuple in ranking:
node = nodeRankTuple[0]
rank = nodeRankTuple[1]
try:
generalRanking[node] += rank
except:
generalRanking[node] = rank
try:
generalRankingNodeOccurences[node] += 1
except:
generalRankingNodeOccurences[node] = 1
finalRanking = []
for node, rank in generalRanking.items():
averageRank = rank / float(generalRankingNodeOccurences[node])
finalRanking += [(node, averageRank)]
finalRanking = sorted(finalRanking, key=operator.itemgetter(1), reverse=False)
headings = ["node id", "username", "average rank over both graphs"]
writeHTML(rankingsFile, headings, finalRanking, readWriteMode='a')
def createPolarityRanking():
limit = 7
# polarity ranking
replies = polarityReplies()
polarityCounter = PolarityCounter(replies)
polarityRanking = polarityCounter.polarityDifferenceRanking()
headings = ["user id", "username", "Greatest polarity difference"]
writeHTML(rankingsFile, headings, polarityRanking[:limit], readWriteMode='a')
polarityRanking.reverse()
headings = ["user id", "username", "Lowest polarity difference"]
writeHTML(rankingsFile, headings, polarityRanking[:limit], readWriteMode='a')
print("users: %s, ranking length: %s" % (len(sqlUsers), len(polarityRanking)))
print(len(polarityCounter.replies))
print(len(polarityCounter.edges))
print("NDI=" + str(polarityCounter.NDI))
def printTime(interval):
seconds = interval / 1000
minutes = seconds / 60
hours = minutes / 60
days = hours / 24
print(days, hours, minutes, seconds)
def polarizationForAllForums():
forums = getForums()
for forum in forums:
forumID = forum["id"]
replies = polarityReplies(forum=forumID)
computePolarizationOverTime(replies, forumID)
def computePolarizationOverTime(replies, forumID):
timeSortedReplies = sorted(replies, key=lambda x: x["timestamp"])
first = timeSortedReplies[0]["timestamp"]
last = timeSortedReplies[-1]["timestamp"]
interval = last - first
timeSliceCount = 20
deltaTimeMillis = interval / timeSliceCount
ndiVariation = []
timeThreshold = first + deltaTimeMillis
while timeThreshold <= last:
r = repliesBeforeTime(timeSortedReplies, timeThreshold)
polarityCounter = PolarityCounter(r)
edgesCount = len(polarityCounter.edges)
ndiVariation += [(timeThreshold, polarityCounter.NDI, edgesCount)]
timeThreshold += deltaTimeMillis
times = [x[0] for x in ndiVariation]
ndis = [x[1] for x in ndiVariation]
edgesCounts = [x[2] for x in ndiVariation]
# print(times)
# print(ndis)
# print(edgesCounts)
filename = "topic" + str(forumID) + ".m"
writeListsToFile(times, ndis, edgesCounts, filename)
def writeListsToFile(times, ndis, edgesCounts, filename):
with open(filename, 'w+') as file:
file.write("times = " + str(times) + ";\n")
file.write("ndis = " + str(ndis) + ";\n")
file.write("edges = " + str(edgesCounts) + ";\n")
plotCode = """
figure
plot(times, ndis)
xlabel('Time (timestamp)')
ylabel('Network Disagreement Index (NDI)')
"""
file.write(plotCode)
def repliesBeforeTime(replies, time):
return [reply for reply in replies if reply["timestamp"] <= time]
def usersWhoSentMultipleMessages(forum=1):
query = """
SELECT m.fkauthor, f.id, COUNT(*) as count
FROM threadmessage as m
INNER JOIN forumthread as t
ON m.fkforumthread=t.id
INNER JOIN forum as f
ON t.fkparentforum=f.id
WHERE f.id=%s
GROUP BY m.fkauthor
HAVING count>1
""" % forum
users = []
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
cursor.execute(query)
for user in cursor:
users += [user]
cursor.close()
return users
def usersWhoParticipateInForum(forum=1):
query = """
SELECT m.fkauthor, f.id, COUNT(*) as count
FROM threadmessage as m
INNER JOIN forumthread as t
ON m.fkforumthread=t.id
INNER JOIN forum as f
ON t.fkparentforum=f.id
WHERE f.id=%s
GROUP BY m.fkauthor
""" % forum
users = []
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
cursor.execute(query)
for user in cursor:
users += [user]
cursor.close()
return users
def getUserCount():
query = "SELECT COUNT(*) as count FROM user"
with sqlite3.connect(databasePath) as connection:
cursor = connection.cursor()
cursor.execute(query)
for row in cursor:
count = row[0]
cursor.close()
return count
def forumContributionRanking():
forums = getForums()
ranking = []
for forum in forums:
forumID = forum["id"]
usersWhoSentMoreThanOneMessage = usersWhoSentMultipleMessages(forum=forumID)
users = usersWhoParticipateInForum(forum=forumID)
gtOne = len(usersWhoSentMoreThanOneMessage)
total = len(users)
ratio = gtOne/float(total)*100
line = (forumID, gtOne, total, str(ratio) + "%")
ranking += [line]
ranking = sorted(ranking, key=lambda x: x[1]/float(x[2]), reverse=True)
headings = ["forum id", "users with sent messages count > 1", "users who sent at least 1 message count", "ratio"]
writeRawHTML(rankingsFile, headings, ranking, readWriteMode='a')
if __name__ == "__main__":
(g1, g2) = readGraphs()
print(len(g1.nodes()))
print(len(g2.nodes()))
writeHeader(rankingsFile)
print("-----")
print("Centrality Rankings for graph 1")
print("-----")
writeH1(rankingsFile, "Replies graph centrality measures (directed weighted)")
createRanking(g1, directed=True)
print()
print("-----")
print("Centrality Rankings for graph 2")
print("-----")
writeH1(rankingsFile, "User-Discussion graph centrality measures (bipartite)")
createRanking(g2, ev=False) # do not compute Eigenvector centrality
print()
print("-----")
print("Conclusion")
print("-----")
writeH1(rankingsFile, "Conclusion")
createGeneralRanking()
createPolarityRanking()
polarizationForAllForums()
forumContributionRanking()
writeFooter(rankingsFile)
print()
print("Successfully generated HTML report.") | [
[
8,
0,
0.0057,
0.0096,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0172,
0.0019,
0,
0.66,
0.0238,
913,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.0192,
0.0019,
0,
0.66... | [
"'''\nCreated on 4 avr. 2013\n\n@author: Nils Amiet\n'''",
"graph1Path = \"graph1.gml\"",
"graph2Path = \"graph2.gml\"",
"rankingsFile = \"rankings.html\"",
"import networkx as nx",
"import networkx.readwrite.gml as gml",
"import operator",
"import math",
"import copy",
"import os",
"import sys"... |
'''
Created on 8 avr. 2013
@author: Nils Amiet
'''
# coding: utf-8
class HTMLTableGenerator:
infrarougeUserURL = "http://www.infrarouge.ch/ir/member-"
tableBorder = 1
def __init__(self, th=[], rows=[], title="Untitled page", users=None):
self.th = th
self.rows = rows
self.title = title
self.users = users
def header(self):
html = "<!DOCTYPE html>"
html += "\n"
html += "<html>"
html += "\n"
html += "<head>"
html += "<title>"
html += str(self.title)
html += "</title>"
html += "</head>"
html += "\n"
html += "<body>"
html += "\n"
return html
def footer(self):
html = "</body>"
html += "\n"
html += "</html>"
return html
def h1(self, text):
html = "<h1>" + text + "</h1>"
return html
def raw(self):
html = "<table border=\"" + str(self.tableBorder) + "\">"
html += "\n"
html += "<tr>"
for heading in self.th:
html += "<th>" + str(heading) + "</th>"
html += "</tr>"
html += "\n"
for row in self.rows:
html += "<tr>"
for col in row:
html += "<td>"
html += str(col)
html += "</td>"
html += "</tr>"
html += "\n"
html += "</table>"
html += "\n"
html += "<br />"
html += "\n"
return html
def __repr__(self):
html = "<table border=\"" + str(self.tableBorder) + "\">"
html += "\n"
html += "<tr>"
for heading in self.th:
html += "<th>" + str(heading) + "</th>"
html += "</tr>"
html += "\n"
for row in self.rows:
html += "<tr>"
colID = 0
for col in row:
html += "<td>"
if colID == 0:
html += "<a href=\"" + self.infrarougeUserURL + str(col) + "\">"
html += str(col)
if colID == 0:
html += "</a>"
html += "</td>"
if colID == 0:
html += "<td>"
try:
userTuple = [user for user in self.users if user[0] == col][0]
username = userTuple[1]
except:
username = "username not found"
html += username
html += "</td>"
colID += 1
html += "</tr>"
html += "\n"
html += "</table>"
html += "\n"
html += "<br />"
html += "\n"
return html
| [
[
8,
0,
0.0263,
0.0439,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
3,
0,
0.5395,
0.9298,
0,
0.66,
1,
408,
0,
6,
0,
0,
0,
0,
8
],
[
14,
1,
0.0965,
0.0088,
1,
0.97,
... | [
"'''\nCreated on 8 avr. 2013\n\n@author: Nils Amiet\n'''",
"class HTMLTableGenerator:\n \n infrarougeUserURL = \"http://www.infrarouge.ch/ir/member-\"\n tableBorder = 1\n \n def __init__(self, th=[], rows=[], title=\"Untitled page\", users=None):\n self.th = th\n self.rows = rows",
... |
'''
Module which brings history information about files from Mercurial.
@author: Rodrigo Damazio
'''
import re
import subprocess
REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')
def _GetOutputLines(args):
'''
Runs an external process and returns its output as a list of lines.
@param args: the arguments to run
'''
process = subprocess.Popen(args,
stdout=subprocess.PIPE,
universal_newlines = True,
shell = False)
output = process.communicate()[0]
return output.splitlines()
def FillMercurialRevisions(filename, parsed_file):
'''
Fills the revs attribute of all strings in the given parsed file with
a list of revisions that touched the lines corresponding to that string.
@param filename: the name of the file to get history for
@param parsed_file: the parsed file to modify
'''
# Take output of hg annotate to get revision of each line
output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename])
# Create a map of line -> revision (key is list index, line 0 doesn't exist)
line_revs = ['dummy']
for line in output_lines:
rev_match = REVISION_REGEX.match(line)
if not rev_match:
raise 'Unexpected line of output from hg: %s' % line
rev_hash = rev_match.group('hash')
line_revs.append(rev_hash)
for str in parsed_file.itervalues():
# Get the lines that correspond to each string
start_line = str['startLine']
end_line = str['endLine']
# Get the revisions that touched those lines
revs = []
for line_number in range(start_line, end_line + 1):
revs.append(line_revs[line_number])
# Merge with any revisions that were already there
# (for explict revision specification)
if 'revs' in str:
revs += str['revs']
# Assign the revisions to the string
str['revs'] = frozenset(revs)
def DoesRevisionSuperceed(filename, rev1, rev2):
'''
Tells whether a revision superceeds another.
This essentially means that the older revision is an ancestor of the newer
one.
This also returns True if the two revisions are the same.
@param rev1: the revision that may be superceeding the other
@param rev2: the revision that may be superceeded
@return: True if rev1 superceeds rev2 or they're the same
'''
if rev1 == rev2:
return True
# TODO: Add filename
args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename]
output_lines = _GetOutputLines(args)
return rev2 in output_lines
def NewestRevision(filename, rev1, rev2):
'''
Returns which of two revisions is closest to the head of the repository.
If none of them is the ancestor of the other, then we return either one.
@param rev1: the first revision
@param rev2: the second revision
'''
if DoesRevisionSuperceed(filename, rev1, rev2):
return rev1
return rev2 | [
[
8,
0,
0.0319,
0.0532,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0745,
0.0106,
0,
0.66,
0.1429,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.0851,
0.0106,
0,
0.66... | [
"'''\nModule which brings history information about files from Mercurial.\n\n@author: Rodrigo Damazio\n'''",
"import re",
"import subprocess",
"REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')",
"def _GetOutputLines(args):\n '''\n Runs an external process and returns its output as a list of lines... |
'''
Module which compares languague files to the master file and detects
issues.
@author: Rodrigo Damazio
'''
import os
from mytracks.parser import StringsParser
import mytracks.history
class Validator(object):
def __init__(self, languages):
'''
Builds a strings file validator.
Params:
@param languages: a dictionary mapping each language to its corresponding directory
'''
self._langs = {}
self._master = None
self._language_paths = languages
parser = StringsParser()
for lang, lang_dir in languages.iteritems():
filename = os.path.join(lang_dir, 'strings.xml')
parsed_file = parser.Parse(filename)
mytracks.history.FillMercurialRevisions(filename, parsed_file)
if lang == 'en':
self._master = parsed_file
else:
self._langs[lang] = parsed_file
self._Reset()
def Validate(self):
'''
Computes whether all the data in the files for the given languages is valid.
'''
self._Reset()
self._ValidateMissingKeys()
self._ValidateOutdatedKeys()
def valid(self):
return (len(self._missing_in_master) == 0 and
len(self._missing_in_lang) == 0 and
len(self._outdated_in_lang) == 0)
def missing_in_master(self):
return self._missing_in_master
def missing_in_lang(self):
return self._missing_in_lang
def outdated_in_lang(self):
return self._outdated_in_lang
def _Reset(self):
# These are maps from language to string name list
self._missing_in_master = {}
self._missing_in_lang = {}
self._outdated_in_lang = {}
def _ValidateMissingKeys(self):
'''
Computes whether there are missing keys on either side.
'''
master_keys = frozenset(self._master.iterkeys())
for lang, file in self._langs.iteritems():
keys = frozenset(file.iterkeys())
missing_in_master = keys - master_keys
missing_in_lang = master_keys - keys
if len(missing_in_master) > 0:
self._missing_in_master[lang] = missing_in_master
if len(missing_in_lang) > 0:
self._missing_in_lang[lang] = missing_in_lang
def _ValidateOutdatedKeys(self):
'''
Computers whether any of the language keys are outdated with relation to the
master keys.
'''
for lang, file in self._langs.iteritems():
outdated = []
for key, str in file.iteritems():
# Get all revisions that touched master and language files for this
# string.
master_str = self._master[key]
master_revs = master_str['revs']
lang_revs = str['revs']
if not master_revs or not lang_revs:
print 'WARNING: No revision for %s in %s' % (key, lang)
continue
master_file = os.path.join(self._language_paths['en'], 'strings.xml')
lang_file = os.path.join(self._language_paths[lang], 'strings.xml')
# Assume that the repository has a single head (TODO: check that),
# and as such there is always one revision which superceeds all others.
master_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2),
master_revs)
lang_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2),
lang_revs)
# If the master version is newer than the lang version
if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev):
outdated.append(key)
if len(outdated) > 0:
self._outdated_in_lang[lang] = outdated
| [
[
8,
0,
0.0304,
0.0522,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0696,
0.0087,
0,
0.66,
0.25,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0783,
0.0087,
0,
0.66,
... | [
"'''\nModule which compares languague files to the master file and detects\nissues.\n\n@author: Rodrigo Damazio\n'''",
"import os",
"from mytracks.parser import StringsParser",
"import mytracks.history",
"class Validator(object):\n\n def __init__(self, languages):\n '''\n Builds a strings file valida... |
#!/usr/bin/python
'''
Entry point for My Tracks i18n tool.
@author: Rodrigo Damazio
'''
import mytracks.files
import mytracks.translate
import mytracks.validate
import sys
def Usage():
print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0]
print 'Commands are:'
print ' cleanup'
print ' translate'
print ' validate'
sys.exit(1)
def Translate(languages):
'''
Asks the user to interactively translate any missing or oudated strings from
the files for the given languages.
@param languages: the languages to translate
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
missing = validator.missing_in_lang()
outdated = validator.outdated_in_lang()
for lang in languages:
untranslated = missing[lang] + outdated[lang]
if len(untranslated) == 0:
continue
translator = mytracks.translate.Translator(lang)
translator.Translate(untranslated)
def Validate(languages):
'''
Computes and displays errors in the string files for the given languages.
@param languages: the languages to compute for
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
error_count = 0
if (validator.valid()):
print 'All files OK'
else:
for lang, missing in validator.missing_in_master().iteritems():
print 'Missing in master, present in %s: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, missing in validator.missing_in_lang().iteritems():
print 'Missing in %s, present in master: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, outdated in validator.outdated_in_lang().iteritems():
print 'Outdated in %s: %s:' % (lang, str(outdated))
error_count = error_count + len(outdated)
return error_count
if __name__ == '__main__':
argv = sys.argv
argc = len(argv)
if argc < 2:
Usage()
languages = mytracks.files.GetAllLanguageFiles()
if argc == 3:
langs = set(argv[2:])
if not langs.issubset(languages):
raise 'Language(s) not found'
# Filter just to the languages specified
languages = dict((lang, lang_file)
for lang, lang_file in languages.iteritems()
if lang in langs or lang == 'en' )
cmd = argv[1]
if cmd == 'translate':
Translate(languages)
elif cmd == 'validate':
error_count = Validate(languages)
else:
Usage()
error_count = 0
print '%d errors found.' % error_count
| [
[
8,
0,
0.0417,
0.0521,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0833,
0.0104,
0,
0.66,
0.125,
640,
0,
1,
0,
0,
640,
0,
0
],
[
1,
0,
0.0938,
0.0104,
0,
0.66,... | [
"'''\nEntry point for My Tracks i18n tool.\n\n@author: Rodrigo Damazio\n'''",
"import mytracks.files",
"import mytracks.translate",
"import mytracks.validate",
"import sys",
"def Usage():\n print('Usage: %s <command> [<language> ...]\\n' % sys.argv[0])\n print('Commands are:')\n print(' cleanup')\n p... |
'''
Module for dealing with resource files (but not their contents).
@author: Rodrigo Damazio
'''
import os.path
from glob import glob
import re
MYTRACKS_RES_DIR = 'MyTracks/res'
ANDROID_MASTER_VALUES = 'values'
ANDROID_VALUES_MASK = 'values-*'
def GetMyTracksDir():
'''
Returns the directory in which the MyTracks directory is located.
'''
path = os.getcwd()
while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)):
if path == '/':
raise 'Not in My Tracks project'
# Go up one level
path = os.path.split(path)[0]
return path
def GetAllLanguageFiles():
'''
Returns a mapping from all found languages to their respective directories.
'''
mytracks_path = GetMyTracksDir()
res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK)
language_dirs = glob(res_dir)
master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES)
if len(language_dirs) == 0:
raise 'No languages found!'
if not os.path.isdir(master_dir):
raise 'Couldn\'t find master file'
language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs]
language_tuples.append(('en', master_dir))
return dict(language_tuples)
| [
[
8,
0,
0.0667,
0.1111,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1333,
0.0222,
0,
0.66,
0.125,
79,
0,
1,
0,
0,
79,
0,
0
],
[
1,
0,
0.1556,
0.0222,
0,
0.66,
... | [
"'''\nModule for dealing with resource files (but not their contents).\n\n@author: Rodrigo Damazio\n'''",
"import os.path",
"from glob import glob",
"import re",
"MYTRACKS_RES_DIR = 'MyTracks/res'",
"ANDROID_MASTER_VALUES = 'values'",
"ANDROID_VALUES_MASK = 'values-*'",
"def GetMyTracksDir():\n '''\n... |
'''
Module which parses a string XML file.
@author: Rodrigo Damazio
'''
from xml.parsers.expat import ParserCreate
import re
#import xml.etree.ElementTree as ET
class StringsParser(object):
'''
Parser for string XML files.
This object is not thread-safe and should be used for parsing a single file at
a time, only.
'''
def Parse(self, file):
'''
Parses the given file and returns a dictionary mapping keys to an object
with attributes for that key, such as the value, start/end line and explicit
revisions.
In addition to the standard XML format of the strings file, this parser
supports an annotation inside comments, in one of these formats:
<!-- KEEP_PARENT name="bla" -->
<!-- KEEP_PARENT name="bla" rev="123456789012" -->
Such an annotation indicates that we're explicitly inheriting form the
master file (and the optional revision says that this decision is compatible
with the master file up to that revision).
@param file: the name of the file to parse
'''
self._Reset()
# Unfortunately expat is the only parser that will give us line numbers
self._xml_parser = ParserCreate()
self._xml_parser.StartElementHandler = self._StartElementHandler
self._xml_parser.EndElementHandler = self._EndElementHandler
self._xml_parser.CharacterDataHandler = self._CharacterDataHandler
self._xml_parser.CommentHandler = self._CommentHandler
file_obj = open(file)
self._xml_parser.ParseFile(file_obj)
file_obj.close()
return self._all_strings
def _Reset(self):
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
self._all_strings = {}
def _StartElementHandler(self, name, attrs):
if name != 'string':
return
if 'name' not in attrs:
return
assert not self._currentString
assert not self._currentStringName
self._currentString = {
'startLine' : self._xml_parser.CurrentLineNumber,
}
if 'rev' in attrs:
self._currentString['revs'] = [attrs['rev']]
self._currentStringName = attrs['name']
self._currentStringValue = ''
def _EndElementHandler(self, name):
if name != 'string':
return
assert self._currentString
assert self._currentStringName
self._currentString['value'] = self._currentStringValue
self._currentString['endLine'] = self._xml_parser.CurrentLineNumber
self._all_strings[self._currentStringName] = self._currentString
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
def _CharacterDataHandler(self, data):
if not self._currentString:
return
self._currentStringValue += data
_KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+'
r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?'
r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*',
re.MULTILINE | re.DOTALL)
def _CommentHandler(self, data):
keep_parent_match = self._KEEP_PARENT_REGEX.match(data)
if not keep_parent_match:
return
name = keep_parent_match.group('name')
self._all_strings[name] = {
'keepParent' : True,
'startLine' : self._xml_parser.CurrentLineNumber,
'endLine' : self._xml_parser.CurrentLineNumber
}
rev = keep_parent_match.group('rev')
if rev:
self._all_strings[name]['revs'] = [rev] | [
[
8,
0,
0.0261,
0.0435,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0609,
0.0087,
0,
0.66,
0.3333,
573,
0,
1,
0,
0,
573,
0,
0
],
[
1,
0,
0.0696,
0.0087,
0,
0.66... | [
"'''\nModule which parses a string XML file.\n\n@author: Rodrigo Damazio\n'''",
"from xml.parsers.expat import ParserCreate",
"import re",
"class StringsParser(object):\n '''\n Parser for string XML files.\n\n This object is not thread-safe and should be used for parsing a single file at\n a time, only.\n... |
'''
Module which prompts the user for translations and saves them.
TODO: implement
@author: Rodrigo Damazio
'''
class Translator(object):
'''
classdocs
'''
def __init__(self, language):
'''
Constructor
'''
self._language = language
def Translate(self, string_names):
print string_names | [
[
8,
0,
0.1905,
0.3333,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
3,
0,
0.7143,
0.619,
0,
0.66,
1,
229,
0,
2,
0,
0,
186,
0,
1
],
[
8,
1,
0.5238,
0.1429,
1,
0.56,
... | [
"'''\nModule which prompts the user for translations and saves them.\n\nTODO: implement\n\n@author: Rodrigo Damazio\n'''",
"class Translator(object):\n '''\n classdocs\n '''\n\n def __init__(self, language):\n '''\n Constructor",
" '''\n classdocs\n '''",
" def __init__(self, language):\n '''... |
#!/usr/bin/env python
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from StringIO import StringIO
from PIL import Image
import datauri
RGBA_BLACK = (0, 0, 0, 255)
sign_ = lambda n: -1 if n < 0 else (1 if n > 0 else 0)
def find_black_region_(im, sx, sy, ex, ey):
dx = sign_(ex - sx)
dy = sign_(ey - sy)
if abs(dx) == abs(dy):
raise 'findRegion_ can\'t look both horizontally and vertically at once.'
pixel_changes = []
pixel_on = False
x = sx
y = sy
while True:
if not pixel_on and im.getpixel((x, y)) == RGBA_BLACK:
pixel_changes.append((x, y))
pixel_on = True
elif pixel_on and im.getpixel((x, y)) != RGBA_BLACK:
pixel_changes.append((x, y))
pixel_on = False
x += dx
y += dy
if x == ex and y == ey:
break
return (pixel_changes[0][0 if dx else 1] - (sx if dx else sy),
pixel_changes[1][0 if dx else 1] - (sx if dx else sy))
def image_to_data_uri_(im):
f = StringIO()
im.save(f, 'PNG')
uri = datauri.to_data_uri(f.getvalue(), 'foo.png')
f.close()
return uri
def main():
src_im = Image.open(sys.argv[1])
# read and parse 9-patch stretch and padding regions
stretch_l, stretch_r = find_black_region_(src_im, 0, 0, src_im.size[0], 0)
stretch_t, stretch_b = find_black_region_(src_im, 0, 0, 0, src_im.size[1])
pad_l, pad_r = find_black_region_(src_im, 0, src_im.size[1] - 1, src_im.size[0], src_im.size[1] - 1)
pad_t, pad_b = find_black_region_(src_im, src_im.size[0] - 1, 0, src_im.size[0] - 1, src_im.size[1])
#padding_box = {}
template_params = {}
template_params['id'] = sys.argv[1]
template_params['icon_uri'] = image_to_data_uri_(src_im)
template_params['dim_constraint_attributes'] = '' # p:lockHeight="true"
template_params['image_uri'] = image_to_data_uri_(src_im.crop((1, 1, src_im.size[0] - 1, src_im.size[1] - 1)))
template_params['width_l'] = stretch_l - 1
template_params['width_r'] = src_im.size[0] - stretch_r - 1
template_params['height_t'] = stretch_t - 1
template_params['height_b'] = src_im.size[1] - stretch_b - 1
template_params['pad_l'] = pad_l - 1
template_params['pad_t'] = pad_t - 1
template_params['pad_r'] = src_im.size[0] - pad_r - 1
template_params['pad_b'] = src_im.size[1] - pad_b - 1
print open('res/shape_9patch_template.xml').read() % template_params
if __name__ == '__main__':
main() | [
[
1,
0,
0.1753,
0.0103,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.1856,
0.0103,
0,
0.66,
0.1,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.1959,
0.0103,
0,
0.6... | [
"import sys",
"import os",
"from StringIO import StringIO",
"from PIL import Image",
"import datauri",
"RGBA_BLACK = (0, 0, 0, 255)",
"sign_ = lambda n: -1 if n < 0 else (1 if n > 0 else 0)",
"def find_black_region_(im, sx, sy, ex, ey):\n dx = sign_(ex - sx)\n dy = sign_(ey - sy)\n if abs(dx) == ab... |
#!/usr/bin/env python
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from StringIO import StringIO
from PIL import Image
import datauri
def image_to_data_uri_(im):
f = StringIO()
im.save(f, 'PNG')
uri = datauri.to_data_uri(f.getvalue(), 'foo.png')
f.close()
return uri
def main():
src_im = Image.open(sys.argv[1])
template_params = {}
template_params['id'] = sys.argv[1]
template_params['image_uri'] = image_to_data_uri_(src_im)
template_params['icon_uri'] = image_to_data_uri_(src_im)
template_params['width'] = src_im.size[0]
template_params['height'] = src_im.size[1]
print open('res/shape_png_template.xml').read() % template_params
if __name__ == '__main__':
main() | [
[
1,
0,
0.3542,
0.0208,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.375,
0.0208,
0,
0.66,
0.1429,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.3958,
0.0208,
0,
0... | [
"import sys",
"import os",
"from StringIO import StringIO",
"from PIL import Image",
"import datauri",
"def image_to_data_uri_(im):\n f = StringIO()\n im.save(f, 'PNG')\n uri = datauri.to_data_uri(f.getvalue(), 'foo.png')\n f.close()\n return uri",
" f = StringIO()",
" im.save(f, 'PNG')",
" ... |
#!/usr/bin/env python
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import os.path
import shutil
import zipfile
def main():
params = {}
params['id'] = sys.argv[1]
params['displayname'] = sys.argv[2]
params['description'] = sys.argv[3]
zip_file = zipfile.ZipFile('dist/stencil-%s.zip' % params['id'], 'w',
zipfile.ZIP_DEFLATED)
# save stencil XML
shapes_xml = ''
shapes_folder = 'res/sets/%s/shapes' % params['id']
for shape_file in os.listdir(shapes_folder):
if not shape_file.endswith('.xml'):
continue
shape_xml = open(os.path.join(shapes_folder, shape_file)).read()
shapes_xml += shape_xml
params['shapes'] = shapes_xml
final_xml = open('res/stencil_template.xml').read() % params
zip_file.writestr('Definition.xml', final_xml)
# save icons
icons_folder = 'res/sets/%s/icons' % params['id']
for icon_file in os.listdir(icons_folder):
if not icon_file.endswith('.png'):
continue
zip_file.writestr(
'icons/%s' % icon_file,
open(os.path.join(icons_folder, icon_file), 'rb').read())
zip_file.close()
if __name__ == '__main__':
main() | [
[
1,
0,
0.2881,
0.0169,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.3051,
0.0169,
0,
0.66,
0.1667,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.322,
0.0169,
0,
0... | [
"import sys",
"import os",
"import os.path",
"import shutil",
"import zipfile",
"def main():\n params = {}\n params['id'] = sys.argv[1]\n params['displayname'] = sys.argv[2]\n params['description'] = sys.argv[3]\n\n zip_file = zipfile.ZipFile('dist/stencil-%s.zip' % params['id'], 'w',\n zipfile.... |
#!/usr/bin/env python
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import sys
import mimetypes
def to_data_uri(data, file_name):
'''Takes a file object and returns its data: string.'''
mime_type = mimetypes.guess_type(file_name)
return 'data:%(mimetype)s;base64,%(data)s' % dict(mimetype=mime_type[0],
data=base64.b64encode(data))
def main():
print to_data_uri(open(sys.argv[1], 'rb').read(), sys.argv[1])
if __name__ == '__main__':
main() | [
[
1,
0,
0.5,
0.0294,
0,
0.66,
0,
177,
0,
1,
0,
0,
177,
0,
0
],
[
1,
0,
0.5294,
0.0294,
0,
0.66,
0.2,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.5588,
0.0294,
0,
0.66,
... | [
"import base64",
"import sys",
"import mimetypes",
"def to_data_uri(data, file_name):\n '''Takes a file object and returns its data: string.'''\n mime_type = mimetypes.guess_type(file_name)\n return 'data:%(mimetype)s;base64,%(data)s' % dict(mimetype=mime_type[0],\n data=base64.b64encode(data))",
" ... |
# -*- coding: UTF-8 -*-
from google.appengine.ext import deferred
from notifiy import email
from notifiy import model
from notifiy import phone
from notifiy import templates
def notify_created(wavelet, blip, modified_by):
"""Sends a created notification to all participants except the modified_by"""
for participant in wavelet.participants:
if participant == modified_by: continue
notify_participant(participant, wavelet, modified_by, blip, blip.text)
def notify_submitted(wavelet, blip, modified_by, message=None):
"""Sends a submitted notification to all participants except the modified_by"""
for participant in wavelet.participants:
if participant == modified_by: continue
notify_participant(participant, wavelet, modified_by, blip,
message or (blip and blip.text) or '[no content]')
def notify_removed(wavelet, modified_by):
"""Sends a deleted notification to all participants except the modified_by"""
for participant in wavelet.participants:
if participant == modified_by: continue
notify_participant(participant, wavelet, modified_by,
wavelet.root_blip, templates.CONTENT_DELETED)
def notify_participant(participant, wavelet, modified_by, blip, message):
deferred.defer(notify_participant_deferred,
participant=participant,
modified_by=modified_by,
title=wavelet.title,
wave_id=wavelet.wave_id,
wavelet_id=wavelet.wavelet_id,
blip_id=blip and blip.blip_id or '',
message=message,
_queue='notify-participant')
def notify_participant_deferred(participant, modified_by, title, wave_id, wavelet_id, blip_id, message):
"""Sends a notification to the participant"""
pp = model.ParticipantPreferences.get_by_pk(participant)
if not pp or not pp.notify: return
pwp = model.ParticipantWavePreferences.get_by_pk(participant, wave_id)
if not pwp or pwp.notify_type == model.NOTIFY_NONE: return
if pwp.notify_type == model.NOTIFY_ONCE:
if not pwp.visited: return
message = templates.NOTIFY_ONCE_TEMPLATE % message
pwp.visited = False
email.send_message(pwp, modified_by, title, wave_id, wavelet_id, blip_id, message)
phone.send_message(pwp, modified_by, title, wave_id, wavelet_id, blip_id, message)
| [
[
1,
0,
0.0469,
0.0156,
0,
0.66,
0,
167,
0,
1,
0,
0,
167,
0,
0
],
[
1,
0,
0.0781,
0.0156,
0,
0.66,
0.1111,
36,
0,
1,
0,
0,
36,
0,
0
],
[
1,
0,
0.0938,
0.0156,
0,
0.... | [
"from google.appengine.ext import deferred",
"from notifiy import email",
"from notifiy import model",
"from notifiy import phone",
"from notifiy import templates",
"def notify_created(wavelet, blip, modified_by):\n \"\"\"Sends a created notification to all participants except the modified_by\"\"\"\n\n... |
# -*- coding: UTF-8 -*-
from google.appengine.ext import db
from google.appengine.ext import deferred
from notifiy import email
from notifiy import gadget
from notifiy import preferences
from notifiy import templates
from notifiy import model
def wavelet_init(wavelet, modified_by):
"""Initialize the wavelet"""
gadget.gadget_add(wavelet)
for participant in wavelet.participants:
participant_wavelet_init(wavelet, participant, modified_by,
message=templates.ROBOT_ADDED)
def participant_init(wavelet, participant):
"""Initialize the participant and return it"""
pp = model.ParticipantPreferences.get_by_pk(participant)
if pp: return pp
pp = model.ParticipantPreferences.get_by_pk(participant, create=True)
if participant.endswith('@googlewave.com'):
pp.email = '%s@gmail.com' % participant.split('@')[0]
pp.put()
preferences.create_preferences_wave(wavelet.robot, participant)
return pp
# TODO do this deferred
def participant_wavelet_init_deferred(wavelet, participant, modified_by, message):
deferred.defer(participant_wavelet_init_deferred, wavelet, participant,
modified_by, message, _queue='participant-wavelet-init')
def participant_wavelet_init(wavelet, participant, modified_by, message=None):
"""Initialize the participant in the wavelet"""
pwp = model.ParticipantWavePreferences.get_by_pk(participant, wavelet.wave_id)
if pwp: return
pwp = model.ParticipantWavePreferences.get_by_pk(participant, wavelet.wave_id, create=True)
pp = participant_init(wavelet, participant)
if not pp.notify_initial: return
email.send_message(pwp, modified_by, wavelet.title, wavelet.wave_id,
wavelet.wavelet_id, wavelet.root_blip.blip_id, message)
def wavelet_deinit(wavelet):
"""De-initialize the wavelet"""
gadget.gadget_remove(wavelet)
def participant_deinit(wavelet, participant):
"""De-initialize the participant, removes al records available and the preferences wave"""
query = model.ParticipantPreferences.all()
query.filter("participant =", participant)
db.delete(query)
query = model.ParticipantWavePreferences.all()
query.filter("participant =", participant)
db.delete(query)
preferences.delete_preferences_wavelet(wavelet)
| [
[
1,
0,
0.04,
0.0133,
0,
0.66,
0,
167,
0,
1,
0,
0,
167,
0,
0
],
[
1,
0,
0.0533,
0.0133,
0,
0.66,
0.0833,
167,
0,
1,
0,
0,
167,
0,
0
],
[
1,
0,
0.08,
0.0133,
0,
0.66... | [
"from google.appengine.ext import db",
"from google.appengine.ext import deferred",
"from notifiy import email",
"from notifiy import gadget",
"from notifiy import preferences",
"from notifiy import templates",
"from notifiy import model",
"def wavelet_init(wavelet, modified_by):\n \"\"\"Initialize... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.