code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of web2py Web Framework (Copyrighted, 2007-2009).
Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu> and
Robin B <robi123@gmail.com>.
License: GPL v2
"""
__all__ = ['MEMDB', 'Field']
import re
import sys
import os
import types
import datetime
import thread
import cStringIO
import csv
import copy
import gluon.validators as validators
from gluon.storage import Storage
from gluon import SQLTABLE
import random
SQL_DIALECTS = {'memcache': {
'boolean': bool,
'string': unicode,
'text': unicode,
'password': unicode,
'blob': unicode,
'upload': unicode,
'integer': long,
'double': float,
'date': datetime.date,
'time': datetime.time,
'datetime': datetime.datetime,
'id': int,
'reference': int,
'lower': None,
'upper': None,
'is null': 'IS NULL',
'is not null': 'IS NOT NULL',
'extract': None,
'left join': None,
}}
def cleanup(text):
if re.compile('[^0-9a-zA-Z_]').findall(text):
raise SyntaxError('Can\'t cleanup \'%s\': only [0-9a-zA-Z_] allowed in table and field names' % text)
return text
def assert_filter_fields(*fields):
for field in fields:
if isinstance(field, (Field, Expression)) and field.type\
in ['text', 'blob']:
raise SyntaxError('AppEngine does not index by: %s'
% field.type)
def dateobj_to_datetime(object):
# convert dates,times to datetimes for AppEngine
if isinstance(object, datetime.date):
object = datetime.datetime(object.year, object.month,
object.day)
if isinstance(object, datetime.time):
object = datetime.datetime(
1970,
1,
1,
object.hour,
object.minute,
object.second,
object.microsecond,
)
return object
def sqlhtml_validators(field_type, length):
v = {
'boolean': [],
'string': validators.IS_LENGTH(length),
'text': [],
'password': validators.IS_LENGTH(length),
'blob': [],
'upload': [],
'double': validators.IS_FLOAT_IN_RANGE(-1e100, 1e100),
'integer': validators.IS_INT_IN_RANGE(-1e100, 1e100),
'date': validators.IS_DATE(),
'time': validators.IS_TIME(),
'datetime': validators.IS_DATETIME(),
'reference': validators.IS_INT_IN_RANGE(0, 1e100),
}
try:
return v[field_type[:9]]
except KeyError:
return []
class DALStorage(dict):
"""
a dictionary that let you do d['a'] as well as d.a
"""
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
if key in self:
raise SyntaxError(
'Object \'%s\'exists and cannot be redefined' % key)
self[key] = value
def __repr__(self):
return '<DALStorage ' + dict.__repr__(self) + '>'
class SQLCallableList(list):
def __call__(self):
return copy.copy(self)
class MEMDB(DALStorage):
"""
an instance of this class represents a database connection
Example::
db=MEMDB(Client())
db.define_table('tablename',Field('fieldname1'),
Field('fieldname2'))
"""
def __init__(self, client):
self._dbname = 'memdb'
self['_lastsql'] = ''
self.tables = SQLCallableList()
self._translator = SQL_DIALECTS['memcache']
self.client = client
def define_table(
self,
tablename,
*fields,
**args
):
tablename = cleanup(tablename)
if tablename in dir(self) or tablename[0] == '_':
raise SyntaxError('invalid table name: %s' % tablename)
if not tablename in self.tables:
self.tables.append(tablename)
else:
raise SyntaxError('table already defined: %s' % tablename)
t = self[tablename] = Table(self, tablename, *fields)
t._create()
return t
def __call__(self, where=''):
return Set(self, where)
class SQLALL(object):
def __init__(self, table):
self.table = table
class Table(DALStorage):
"""
an instance of this class represents a database table
Example::
db=MEMDB(Client())
db.define_table('users',Field('name'))
db.users.insert(name='me')
"""
def __init__(
self,
db,
tablename,
*fields
):
self._db = db
self._tablename = tablename
self.fields = SQLCallableList()
self._referenced_by = []
fields = list(fields)
fields.insert(0, Field('id', 'id'))
for field in fields:
self.fields.append(field.name)
self[field.name] = field
field._tablename = self._tablename
field._table = self
field._db = self._db
self.ALL = SQLALL(self)
def _create(self):
fields = []
myfields = {}
for k in self.fields:
field = self[k]
attr = {}
if not field.type[:9] in ['id', 'reference']:
if field.notnull:
attr = dict(required=True)
if field.type[:2] == 'id':
continue
if field.type[:9] == 'reference':
referenced = field.type[10:].strip()
if not referenced:
raise SyntaxError('Table %s: reference \'%s\' to nothing!' % (
self._tablename, k))
if not referenced in self._db:
raise SyntaxError(
'Table: table %s does not exist' % referenced)
referee = self._db[referenced]
ftype = \
self._db._translator[field.type[:9]](
self._db[referenced]._tableobj)
if self._tablename in referee.fields: # ## THIS IS OK
raise SyntaxError('Field: table \'%s\' has same name as a field '
'in referenced table \'%s\'' % (
self._tablename, referenced))
self._db[referenced]._referenced_by.append((self._tablename,
field.name))
elif not field.type in self._db._translator\
or not self._db._translator[field.type]:
raise SyntaxError('Field: unkown field type %s' % field.type)
self._tableobj = self._db.client
return None
def create(self):
# nothing to do, here for backward compatility
pass
def drop(self):
# nothing to do, here for backward compatibility
self._db(self.id > 0).delete()
def insert(self, **fields):
id = self._create_id()
if self.update(id, **fields):
return long(id)
else:
return None
def get(self, id):
val = self._tableobj.get(self._id_to_key(id))
if val:
return Storage(val)
else:
return None
def update(self, id, **fields):
for field in fields:
if not field in fields and self[field].default\
is not None:
fields[field] = self[field].default
if field in fields:
fields[field] = obj_represent(fields[field],
self[field].type, self._db)
return self._tableobj.set(self._id_to_key(id), fields)
def delete(self, id):
return self._tableobj.delete(self._id_to_key(id))
def _shard_key(self, shard):
return self._id_to_key('s/%s' % shard)
def _id_to_key(self, id):
return '__memdb__/t/%s/k/%s' % (self._tablename, str(id))
def _create_id(self):
shard = random.randint(10, 99)
shard_id = self._shard_key(shard)
id = self._tableobj.incr(shard_id)
if not id:
if self._tableobj.set(shard_id, '0'):
id = 0
else:
raise Exception('cannot set memcache')
return long(str(shard) + str(id))
def __str__(self):
return self._tablename
class Expression(object):
def __init__(
self,
name,
type='string',
db=None,
):
(self.name, self.type, self._db) = (name, type, db)
def __str__(self):
return self.name
def __or__(self, other): # for use in sortby
assert_filter_fields(self, other)
return Expression(self.name + '|' + other.name, None, None)
def __invert__(self):
assert_filter_fields(self)
return Expression('-' + self.name, self.type, None)
# for use in Query
def __eq__(self, value):
return Query(self, '=', value)
def __ne__(self, value):
return Query(self, '!=', value)
def __lt__(self, value):
return Query(self, '<', value)
def __le__(self, value):
return Query(self, '<=', value)
def __gt__(self, value):
return Query(self, '>', value)
def __ge__(self, value):
return Query(self, '>=', value)
# def like(self,value): return Query(self,' LIKE ',value)
# def belongs(self,value): return Query(self,' IN ',value)
# for use in both Query and sortby
def __add__(self, other):
return Expression('%s+%s' % (self, other), 'float', None)
def __sub__(self, other):
return Expression('%s-%s' % (self, other), 'float', None)
def __mul__(self, other):
return Expression('%s*%s' % (self, other), 'float', None)
def __div__(self, other):
return Expression('%s/%s' % (self, other), 'float', None)
class Field(Expression):
"""
an instance of this class represents a database field
example::
a = Field(name, 'string', length=32, required=False,
default=None, requires=IS_NOT_EMPTY(), notnull=False,
unique=False, uploadfield=True)
to be used as argument of GQLDB.define_table
allowed field types:
string, boolean, integer, double, text, blob,
date, time, datetime, upload, password
strings must have a length or 512 by default.
fields should have a default or they will be required in SQLFORMs
the requires argument are used to validate the field input in SQLFORMs
"""
def __init__(
self,
fieldname,
type='string',
length=None,
default=None,
required=False,
requires=sqlhtml_validators,
ondelete='CASCADE',
notnull=False,
unique=False,
uploadfield=True,
):
self.name = cleanup(fieldname)
if fieldname in dir(Table) or fieldname[0] == '_':
raise SyntaxError('Field: invalid field name: %s' % fieldname)
if isinstance(type, Table):
type = 'reference ' + type._tablename
if not length:
length = 512
self.type = type # 'string', 'integer'
self.length = length # the length of the string
self.default = default # default value for field
self.required = required # is this field required
self.ondelete = ondelete.upper() # this is for reference fields only
self.notnull = notnull
self.unique = unique
self.uploadfield = uploadfield
if requires == sqlhtml_validators:
requires = sqlhtml_validators(type, length)
elif requires is None:
requires = []
self.requires = requires # list of validators
def formatter(self, value):
if value is None or not self.requires:
return value
if not isinstance(self.requires, (list, tuple)):
requires = [self.requires]
else:
requires = copy.copy(self.requires)
requires.reverse()
for item in requires:
if hasattr(item, 'formatter'):
value = item.formatter(value)
return value
def __str__(self):
return '%s.%s' % (self._tablename, self.name)
MEMDB.Field = Field # ## required by gluon/globals.py session.connect
def obj_represent(object, fieldtype, db):
if object is not None:
if fieldtype == 'date' and not isinstance(object,
datetime.date):
(y, m, d) = [int(x) for x in str(object).strip().split('-')]
object = datetime.date(y, m, d)
elif fieldtype == 'time' and not isinstance(object, datetime.time):
time_items = [int(x) for x in str(object).strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
object = datetime.time(h, mi, s)
elif fieldtype == 'datetime' and not isinstance(object,
datetime.datetime):
(y, m, d) = [int(x) for x in
str(object)[:10].strip().split('-')]
time_items = [int(x) for x in
str(object)[11:].strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
object = datetime.datetime(
y,
m,
d,
h,
mi,
s,
)
elif fieldtype == 'integer' and not isinstance(object, long):
object = long(object)
return object
class QueryException:
def __init__(self, **a):
self.__dict__ = a
class Query(object):
"""
A query object necessary to define a set.
It can be stored or can be passed to GQLDB.__call__() to obtain a Set
Example:
query=db.users.name=='Max'
set=db(query)
records=set.select()
"""
def __init__(
self,
left,
op=None,
right=None,
):
if isinstance(right, (Field, Expression)):
raise SyntaxError(
'Query: right side of filter must be a value or entity')
if isinstance(left, Field) and left.name == 'id':
if op == '=':
self.get_one = \
QueryException(tablename=left._tablename,
id=long(right))
return
else:
raise SyntaxError('only equality by id is supported')
raise SyntaxError('not supported')
def __str__(self):
return str(self.left)
class Set(object):
"""
As Set represents a set of records in the database,
the records are identified by the where=Query(...) object.
normally the Set is generated by GQLDB.__call__(Query(...))
given a set, for example
set=db(db.users.name=='Max')
you can:
set.update(db.users.name='Massimo')
set.delete() # all elements in the set
set.select(orderby=db.users.id,groupby=db.users.name,limitby=(0,10))
and take subsets:
subset=set(db.users.id<5)
"""
def __init__(self, db, where=None):
self._db = db
self._tables = []
self.filters = []
if hasattr(where, 'get_all'):
self.where = where
self._tables.insert(0, where.get_all)
elif hasattr(where, 'get_one') and isinstance(where.get_one,
QueryException):
self.where = where.get_one
else:
# find out which tables are involved
if isinstance(where, Query):
self.filters = where.left
self.where = where
self._tables = [field._tablename for (field, op, val) in
self.filters]
def __call__(self, where):
if isinstance(self.where, QueryException) or isinstance(where,
QueryException):
raise SyntaxError('neither self.where nor where can be a QueryException instance')
if self.where:
return Set(self._db, self.where & where)
else:
return Set(self._db, where)
def _get_table_or_raise(self):
tablenames = list(set(self._tables)) # unique
if len(tablenames) < 1:
raise SyntaxError('Set: no tables selected')
if len(tablenames) > 1:
raise SyntaxError('Set: no join in appengine')
return self._db[tablenames[0]]._tableobj
def _getitem_exception(self):
(tablename, id) = (self.where.tablename, self.where.id)
fields = self._db[tablename].fields
self.colnames = ['%s.%s' % (tablename, t) for t in fields]
item = self._db[tablename].get(id)
return (item, fields, tablename, id)
def _select_except(self):
(item, fields, tablename, id) = self._getitem_exception()
if not item:
return []
new_item = []
for t in fields:
if t == 'id':
new_item.append(long(id))
else:
new_item.append(getattr(item, t))
r = [new_item]
return Rows(self._db, r, *self.colnames)
def select(self, *fields, **attributes):
"""
Always returns a Rows object, even if it may be empty
"""
if isinstance(self.where, QueryException):
return self._select_except()
else:
raise SyntaxError('select arguments not supported')
def count(self):
return len(self.select())
def delete(self):
if isinstance(self.where, QueryException):
(item, fields, tablename, id) = self._getitem_exception()
if not item:
return
self._db[tablename].delete(id)
else:
raise Exception('deletion not implemented')
def update(self, **update_fields):
if isinstance(self.where, QueryException):
(item, fields, tablename, id) = self._getitem_exception()
if not item:
return
for (key, value) in update_fields.items():
setattr(item, key, value)
self._db[tablename].update(id, **item)
else:
raise Exception('update not implemented')
def update_record(
t,
s,
id,
a,
):
item = s.get(id)
for (key, value) in a.items():
t[key] = value
setattr(item, key, value)
s.update(id, **item)
class Rows(object):
"""
A wrapper for the return value of a select. It basically represents a table.
It has an iterator and each row is represented as a dictionary.
"""
# ## this class still needs some work to care for ID/OID
def __init__(
self,
db,
response,
*colnames
):
self._db = db
self.colnames = colnames
self.response = response
def __len__(self):
return len(self.response)
def __getitem__(self, i):
if i >= len(self.response) or i < 0:
raise SyntaxError('Rows: no such row: %i' % i)
if len(self.response[0]) != len(self.colnames):
raise SyntaxError('Rows: internal error')
row = DALStorage()
for j in xrange(len(self.colnames)):
value = self.response[i][j]
if isinstance(value, unicode):
value = value.encode('utf-8')
packed = self.colnames[j].split('.')
try:
(tablename, fieldname) = packed
except:
if not '_extra' in row:
row['_extra'] = DALStorage()
row['_extra'][self.colnames[j]] = value
continue
table = self._db[tablename]
field = table[fieldname]
if not tablename in row:
row[tablename] = DALStorage()
if field.type[:9] == 'reference':
referee = field.type[10:].strip()
rid = value
row[tablename][fieldname] = rid
elif field.type == 'boolean' and value is not None:
# row[tablename][fieldname]=Set(self._db[referee].id==rid)
if value == True or value == 'T':
row[tablename][fieldname] = True
else:
row[tablename][fieldname] = False
elif field.type == 'date' and value is not None\
and not isinstance(value, datetime.date):
(y, m, d) = [int(x) for x in
str(value).strip().split('-')]
row[tablename][fieldname] = datetime.date(y, m, d)
elif field.type == 'time' and value is not None\
and not isinstance(value, datetime.time):
time_items = [int(x) for x in
str(value).strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
row[tablename][fieldname] = datetime.time(h, mi, s)
elif field.type == 'datetime' and value is not None\
and not isinstance(value, datetime.datetime):
(y, m, d) = [int(x) for x in
str(value)[:10].strip().split('-')]
time_items = [int(x) for x in
str(value)[11:].strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
row[tablename][fieldname] = datetime.datetime(
y,
m,
d,
h,
mi,
s,
)
else:
row[tablename][fieldname] = value
if fieldname == 'id':
id = row[tablename].id
row[tablename].update_record = lambda t = row[tablename], \
s = self._db[tablename], id = id, **a: update_record(t,
s, id, a)
for (referee_table, referee_name) in \
table._referenced_by:
s = self._db[referee_table][referee_name]
row[tablename][referee_table] = Set(self._db, s
== id)
if len(row.keys()) == 1:
return row[row.keys()[0]]
return row
def __iter__(self):
"""
iterator over records
"""
for i in xrange(len(self)):
yield self[i]
def __str__(self):
"""
serializes the table into a csv file
"""
s = cStringIO.StringIO()
writer = csv.writer(s)
writer.writerow(self.colnames)
c = len(self.colnames)
for i in xrange(len(self)):
row = [self.response[i][j] for j in xrange(c)]
for k in xrange(c):
if isinstance(row[k], unicode):
row[k] = row[k].encode('utf-8')
writer.writerow(row)
return s.getvalue()
def xml(self):
"""
serializes the table using SQLTABLE (if present)
"""
return SQLTABLE(self).xml()
def test_all():
"""
How to run from web2py dir:
export PYTHONPATH=.:YOUR_PLATFORMS_APPENGINE_PATH
python gluon/contrib/memdb.py
Setup the UTC timezone and database stubs
>>> import os
>>> os.environ['TZ'] = 'UTC'
>>> import time
>>> if hasattr(time, 'tzset'):
... time.tzset()
>>>
>>> from google.appengine.api import apiproxy_stub_map
>>> from google.appengine.api.memcache import memcache_stub
>>> apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
>>> apiproxy_stub_map.apiproxy.RegisterStub('memcache', memcache_stub.MemcacheServiceStub())
Create a table with all possible field types
>>> from google.appengine.api.memcache import Client
>>> db=MEMDB(Client())
>>> tmp=db.define_table('users', Field('stringf','string',length=32,required=True), Field('booleanf','boolean',default=False), Field('passwordf','password',notnull=True), Field('blobf','blob'), Field('uploadf','upload'), Field('integerf','integer',unique=True), Field('doublef','double',unique=True,notnull=True), Field('datef','date',default=datetime.date.today()), Field('timef','time'), Field('datetimef','datetime'), migrate='test_user.table')
Insert a field
>>> user_id = db.users.insert(stringf='a',booleanf=True,passwordf='p',blobf='0A', uploadf=None, integerf=5,doublef=3.14, datef=datetime.date(2001,1,1), timef=datetime.time(12,30,15), datetimef=datetime.datetime(2002,2,2,12,30,15))
>>> user_id != None
True
Select all
# >>> all = db().select(db.users.ALL)
Drop the table
# >>> db.users.drop()
Select many entities
>>> tmp = db.define_table(\"posts\", Field('body','text'), Field('total','integer'), Field('created_at','datetime'))
>>> many = 20 #2010 # more than 1000 single fetch limit (it can be slow)
>>> few = 5
>>> most = many - few
>>> 0 < few < most < many
True
>>> for i in range(many):
... f=db.posts.insert(body='', total=i,created_at=datetime.datetime(2008, 7, 6, 14, 15, 42, i))
>>>
# test timezones
>>> class TZOffset(datetime.tzinfo):
... def __init__(self,offset=0):
... self.offset = offset
... def utcoffset(self, dt): return datetime.timedelta(hours=self.offset)
... def dst(self, dt): return datetime.timedelta(0)
... def tzname(self, dt): return 'UTC' + str(self.offset)
...
>>> SERVER_OFFSET = -8
>>>
>>> stamp = datetime.datetime(2008, 7, 6, 14, 15, 42, 828201)
>>> post_id = db.posts.insert(created_at=stamp,body='body1')
>>> naive_stamp = db(db.posts.id==post_id).select()[0].created_at
>>> utc_stamp=naive_stamp.replace(tzinfo=TZOffset())
>>> server_stamp = utc_stamp.astimezone(TZOffset(SERVER_OFFSET))
>>> stamp == naive_stamp
True
>>> utc_stamp == server_stamp
True
>>> rows = db(db.posts.id==post_id).select()
>>> len(rows) == 1
True
>>> rows[0].body == 'body1'
True
>>> db(db.posts.id==post_id).delete()
>>> rows = db(db.posts.id==post_id).select()
>>> len(rows) == 0
True
>>> id = db.posts.insert(total='0') # coerce str to integer
>>> rows = db(db.posts.id==id).select()
>>> len(rows) == 1
True
>>> rows[0].total == 0
True
Examples of insert, select, update, delete
>>> tmp=db.define_table('person', Field('name'), Field('birth','date'), migrate='test_person.table')
>>> marco_id=db.person.insert(name=\"Marco\",birth='2005-06-22')
>>> person_id=db.person.insert(name=\"Massimo\",birth='1971-12-21')
>>> me=db(db.person.id==person_id).select()[0] # test select
>>> me.name
'Massimo'
>>> db(db.person.id==person_id).update(name='massimo') # test update
>>> me = db(db.person.id==person_id).select()[0]
>>> me.name
'massimo'
>>> str(me.birth)
'1971-12-21'
# resave date to ensure it comes back the same
>>> me=db(db.person.id==person_id).update(birth=me.birth) # test update
>>> me = db(db.person.id==person_id).select()[0]
>>> me.birth
datetime.date(1971, 12, 21)
>>> db(db.person.id==marco_id).delete() # test delete
>>> len(db(db.person.id==marco_id).select())
0
Update a single record
>>> me.update_record(name=\"Max\")
>>> me.name
'Max'
>>> me = db(db.person.id == person_id).select()[0]
>>> me.name
'Max'
"""
SQLField = Field
SQLTable = Table
SQLXorable = Expression
SQLQuery = Query
SQLSet = Set
SQLRows = Rows
SQLStorage = DALStorage
if __name__ == '__main__':
import doctest
doctest.testmod()
| Python |
# Only Python 2.6 and up, because of NamedTuple.
import time
from collections import namedtuple
Score = namedtuple('Score', ['tag', 'stamp'])
class TimeCollector(object):
def __init__(self):
'''The first time stamp is created here'''
self.scores = [Score(tag='start', stamp=time.clock())]
def addStamp(self, description):
'''Adds a new time stamp, with a description.'''
self.scores.append(Score(tag=description, stamp=time.clock()))
def _stampDelta(self, index1, index2):
'''Private utility function to clean up this common calculation.'''
return self.scores[index1].stamp - self.scores[index2].stamp
def getReportItems(self, orderByCost=True):
'''Returns a list of dicts. Each dict has
start (ms),
end (ms),
delta (ms),
perc (%),
tag (str)
'''
self.scores.append(Score(tag='finish', stamp=time.clock()))
total_time = self._stampDelta(-1, 0)
data = []
for i in range(1, len(self.scores)):
delta = self._stampDelta(i, i - 1)
if abs(total_time) < 1e-6:
perc = 0
else:
perc = delta / total_time * 100
data.append(
dict(
start=self._stampDelta(i - 1, 0) * 1000,
end=self._stampDelta(i, 0) * 1000,
delta=delta * 1000,
perc=perc,
tag=self.scores[i].tag
)
)
if orderByCost:
data.sort(key=lambda x: x['perc'], reverse=True)
return data
def getReportLines(self, orderByCost=True):
'''Produces a report of logged time-stamps as a list of strings.
if orderByCost is False, then the order of the stamps is
chronological.'''
data = self.getReportItems(orderByCost)
headerTemplate = '%10s | %10s | %10s | %11s | %-30s'
headerData = ('Start(ms)', 'End(ms)', 'Delta(ms)', 'Time Cost',
'Description')
bodyTemplate = '%(start)10.0f | %(end)10.0f | %(delta)10.0f |' \
+ ' %(perc)10.0f%% | %(tag)-30s'
return [headerTemplate % headerData] + [bodyTemplate % d for d in data]
def getReportText(self, **kwargs):
return '\n'.join(self.getReportLines(**kwargs))
def restart(self):
self.scores = [Score(tag='start', stamp=time.clock())]
if __name__ == '__main__':
print('')
print('Testing:')
print('')
# First create the collector
t = TimeCollector()
x = [i for i in range(1000)]
# Every time some work gets done, add a stamp
t.addStamp('Initialization Section')
x = [i for i in range(10000)]
t.addStamp('A big loop')
x = [i for i in range(100000)]
t.addStamp('calling builder function')
# Finally, obtain the results
print('')
print(t.getReportText())
# If you want to measure something else in the same scope, you can
# restart the collector.
t.restart()
x = [i for i in range(1000000)]
t.addStamp('Part 2')
x = [i for i in range(1000000)]
t.addStamp('Cleanup')
# And once again report results
print('')
print(t.getReportText())
t.restart()
for y in range(1, 200, 20):
x = [i for i in range(10000) * y]
t.addStamp('Iteration when y = ' + str(y))
print('')
# You can turn off ordering of results
print(t.getReportText(orderByCost=False))
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""`cssmin` - A Python port of the YUI CSS compressor."""
"""
Home page: https://github.com/zacharyvoase/cssmin
License: BSD: https://github.com/zacharyvoase/cssmin/blob/master/LICENSE
Original author: Zachary Voase
Modified for inclusion into web2py by: Ross Peoples <ross.peoples@gmail.com>
"""
from StringIO import StringIO # The pure-Python StringIO supports unicode.
import re
__version__ = '0.1.4'
def remove_comments(css):
"""Remove all CSS comment blocks."""
iemac = False
preserve = False
comment_start = css.find("/*")
while comment_start >= 0:
# Preserve comments that look like `/*!...*/`.
# Slicing is used to make sure we don"t get an IndexError.
preserve = css[comment_start + 2:comment_start + 3] == "!"
comment_end = css.find("*/", comment_start + 2)
if comment_end < 0:
if not preserve:
css = css[:comment_start]
break
elif comment_end >= (comment_start + 2):
if css[comment_end - 1] == "\\":
# This is an IE Mac-specific comment; leave this one and the
# following one alone.
comment_start = comment_end + 2
iemac = True
elif iemac:
comment_start = comment_end + 2
iemac = False
elif not preserve:
css = css[:comment_start] + css[comment_end + 2:]
else:
comment_start = comment_end + 2
comment_start = css.find("/*", comment_start)
return css
def remove_unnecessary_whitespace(css):
"""Remove unnecessary whitespace characters."""
def pseudoclasscolon(css):
"""
Prevents 'p :link' from becoming 'p:link'.
Translates 'p :link' into 'p ___PSEUDOCLASSCOLON___link'; this is
translated back again later.
"""
regex = re.compile(r"(^|\})(([^\{\:])+\:)+([^\{]*\{)")
match = regex.search(css)
while match:
css = ''.join([
css[:match.start()],
match.group().replace(":", "___PSEUDOCLASSCOLON___"),
css[match.end():]])
match = regex.search(css)
return css
css = pseudoclasscolon(css)
# Remove spaces from before things.
css = re.sub(r"\s+([!{};:>+\(\)\],])", r"\1", css)
# If there is a `@charset`, then only allow one, and move to the beginning.
css = re.sub(r"^(.*)(@charset \"[^\"]*\";)", r"\2\1", css)
css = re.sub(r"^(\s*@charset [^;]+;\s*)+", r"\1", css)
# Put the space back in for a few cases, such as `@media screen` and
# `(-webkit-min-device-pixel-ratio:0)`.
css = re.sub(r"\band\(", "and (", css)
# Put the colons back.
css = css.replace('___PSEUDOCLASSCOLON___', ':')
# Remove spaces from after things.
css = re.sub(r"([!{}:;>+\(\[,])\s+", r"\1", css)
return css
def remove_unnecessary_semicolons(css):
"""Remove unnecessary semicolons."""
return re.sub(r";+\}", "}", css)
def remove_empty_rules(css):
"""Remove empty rules."""
return re.sub(r"[^\}\{]+\{\}", "", css)
def normalize_rgb_colors_to_hex(css):
"""Convert `rgb(51,102,153)` to `#336699`."""
regex = re.compile(r"rgb\s*\(\s*([0-9,\s]+)\s*\)")
match = regex.search(css)
while match:
colors = map(lambda s: s.strip(), match.group(1).split(","))
hexcolor = '#%.2x%.2x%.2x' % tuple(map(int, colors))
css = css.replace(match.group(), hexcolor)
match = regex.search(css)
return css
def condense_zero_units(css):
"""Replace `0(px, em, %, etc)` with `0`."""
return re.sub(r"([\s:])(0)(px|em|%|in|cm|mm|pc|pt|ex)", r"\1\2", css)
def condense_multidimensional_zeros(css):
"""Replace `:0 0 0 0;`, `:0 0 0;` etc. with `:0;`."""
css = css.replace(":0 0 0 0;", ":0;")
css = css.replace(":0 0 0;", ":0;")
css = css.replace(":0 0;", ":0;")
# Revert `background-position:0;` to the valid `background-position:0 0;`.
css = css.replace("background-position:0;", "background-position:0 0;")
return css
def condense_floating_points(css):
"""Replace `0.6` with `.6` where possible."""
return re.sub(r"(:|\s)0+\.(\d+)", r"\1.\2", css)
def condense_hex_colors(css):
"""Shorten colors from #AABBCC to #ABC where possible."""
regex = re.compile(r"([^\"'=\s])(\s*)#([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])")
match = regex.search(css)
while match:
first = match.group(3) + match.group(5) + match.group(7)
second = match.group(4) + match.group(6) + match.group(8)
if first.lower() == second.lower():
css = css.replace(
match.group(), match.group(1) + match.group(2) + '#' + first)
match = regex.search(css, match.end() - 3)
else:
match = regex.search(css, match.end())
return css
def condense_whitespace(css):
"""Condense multiple adjacent whitespace characters into one."""
return re.sub(r"\s+", " ", css)
def condense_semicolons(css):
"""Condense multiple adjacent semicolon characters into one."""
return re.sub(r";;+", ";", css)
def wrap_css_lines(css, line_length):
"""Wrap the lines of the given CSS to an approximate length."""
lines = []
line_start = 0
for i, char in enumerate(css):
# It's safe to break after `}` characters.
if char == '}' and (i - line_start >= line_length):
lines.append(css[line_start:i + 1])
line_start = i + 1
if line_start < len(css):
lines.append(css[line_start:])
return '\n'.join(lines)
def cssmin(css, wrap=None):
css = remove_comments(css)
css = condense_whitespace(css)
# A pseudo class for the Box Model Hack
# (see http://tantek.com/CSS/Examples/boxmodelhack.html)
css = css.replace('"\\"}\\""', "___PSEUDOCLASSBMH___")
css = remove_unnecessary_whitespace(css)
css = remove_unnecessary_semicolons(css)
css = condense_zero_units(css)
css = condense_multidimensional_zeros(css)
css = condense_floating_points(css)
css = normalize_rgb_colors_to_hex(css)
css = condense_hex_colors(css)
if wrap is not None:
css = wrap_css_lines(css, wrap)
css = css.replace("___PSEUDOCLASSBMH___", '"\\"}\\""')
css = condense_semicolons(css)
return css.strip()
def main():
import optparse
import sys
p = optparse.OptionParser(
prog="cssmin", version=__version__,
usage="%prog [--wrap N]",
description="""Reads raw CSS from stdin, and writes compressed CSS to stdout.""")
p.add_option(
'-w', '--wrap', type='int', default=None, metavar='N',
help="Wrap output to approximately N chars per line.")
options, args = p.parse_args()
sys.stdout.write(cssmin(sys.stdin.read(), wrap=options.wrap))
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
High-level CSS and JS minification class for web2py.
Called by response.include_files()
Created by: Ross Peoples <ross.peoples@gmail.com>
Modified by: Massimo Di Pierro <massimo.dipierro@gmail.com>
"""
import cssmin
import jsmin
import os
import hashlib
import re
def read_binary_file(filename):
f = open(filename, 'rb')
data = f.read()
f.close()
return data
def write_binary_file(filename, data):
f = open(filename, 'wb')
f.write(data)
f.close()
def fix_links(css, static_path):
return re.sub(r'url\((["\'])\.\./', 'url(\\1' + static_path, css)
def minify(files, path_info, folder, optimize_css, optimize_js,
ignore_concat=[],
ignore_minify=['/jquery.js', '/anytime.js']):
"""
Input:
files: is a list of URLs to JS and CSS files (not repeated)
path_info: is the URL of a temp static folder
folder: is the application folder
optimize_css: is a string of the form 'concat|minify|inline'
optimize_js: is a string of the form 'concat|minify|inline'
(minify requires concat, inline requires concat also)
Returns a new list of:
- filename (absolute or relative, css or js, actual or temporary) or
- ('css:inline','...css..')
- ('js:inline','...js..')
"""
optimize_css = optimize_css or ''
optimize_js = optimize_js or ''
concat_css = 'concat' in optimize_css
minify_css = 'minify' in optimize_css
inline_css = 'inline' in optimize_css
concat_js = 'concat' in optimize_js
minify_js = 'minify' in optimize_js
inline_js = 'inline' in optimize_js
static_path, temp = path_info.rsplit('/', 1)
new_files = []
css = []
js = []
processed = []
for k, filename in enumerate(files):
if not filename.startswith('/') or \
any(filename.endswith(x)
for x in ignore_concat):
new_files.append(filename)
continue
abs_filename = os.path.join(
folder, 'static', filename[len(static_path) + 1:])
if filename.lower().endswith('.css'):
processed.append(filename)
spath_info, sfilename = \
path_info.split('/'), filename.split('/')
u = 0
for i, a in enumerate(sfilename):
try:
if a != spath_info[i]:
u = i
break
except:
pass
if concat_css:
contents = read_binary_file(abs_filename)
replacement = '/'.join(spath_info[:u]) + '/'
contents = fix_links(contents, replacement)
if minify_css:
css.append(cssmin.cssmin(contents))
else:
css.append(contents)
else:
css.append(filename)
elif filename.lower().endswith('.js'):
processed.append(filename)
if concat_js:
contents = read_binary_file(abs_filename)
if minify_js and \
not filename.endswith('.min.js') and \
not any(filename.endswith(x)
for x in ignore_minify):
js.append(jsmin.jsmin(contents))
else:
js.append(contents)
else:
js.append(filename)
dest_key = hashlib.md5(repr(processed)).hexdigest()
if css and concat_css:
css = '\n\n'.join(contents for contents in css)
if not inline_css:
temppath = os.path.join(folder, 'static', temp)
if not os.path.exists(temppath):
os.mkdir(temppath)
dest = "compressed_%s.css" % dest_key
tempfile = os.path.join(temppath, dest)
write_binary_file(tempfile, css)
css = path_info + '/%s' % dest
new_files.append(css)
else:
new_files.append(('css:inline', css))
else:
new_files += css
if js and concat_js:
js = '\n'.join(contents for contents in js)
if inline_js:
js = ('js:inline', js)
else:
temppath = os.path.join(folder, 'static', temp)
if not os.path.exists(temppath):
os.mkdir(temppath)
dest = "compressed_%s.js" % dest_key
tempfile = os.path.join(folder, 'static', temp, dest)
write_binary_file(tempfile, js)
js = path_info + '/%s' % dest
new_files.append(js)
else:
new_files += js
return new_files
| Python |
# coding: utf-8
import re
def minify(response):
def _replace(match):
match = match.group()
# save whole <pre>, <textarea> tags, and opening <!-- (so it doesn't break <script>)
# otherwise, replace all whitespace with a single space character
return match if match.startswith(('<pre', '<textarea', '<!--')) else ' '
cpat = re.compile(
r'\s+|<pre(.*?)</pre>|<textarea(.*?)</textarea>|<!--\s', re.DOTALL)
return cpat.sub(_replace, response)
| Python |
#!/usr/bin/env python
# -*- coding: ascii -*-
#
# Copyright 2011
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
=====================
Javascript Minifier
=====================
Javascript Minifier based on `jsmin.c by Douglas Crockford`_\.
This module is a re-implementation based on the semantics of jsmin.c. Usually
it produces the same results. It differs in the following ways:
- there is no error detection: unterminated string, regex and comment
literals are treated as regular javascript code and minified as such.
- Control characters inside string and regex literals are left untouched; they
are not converted to spaces (nor to \n)
- Newline characters are not allowed inside string and regex literals, except
for line continuations in string literals (ECMA-5).
- "return /regex/" is recognized correctly.
- rjsmin does not handle streams, but only complete strings. (However, the
module provides a "streamy" interface).
Besides the list above it differs from direct python ports of jsmin.c in
speed. Since most parts of the logic are handled by the regex engine it's way
faster than the original python port by Baruch Even. The speed factor varies
between about 6 and 55 depending on input and python version (it gets faster
the more compressed the input already is). Compared to the speed-refactored
python port by Dave St.Germain the performance gain is less dramatic but still
between 1.2 and 7. See the docs/BENCHMARKS file for details.
rjsmin.c is a reimplementation of rjsmin.py in C and speeds it up even more.
Both python 2 and python 3 are supported.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
Original author of Python version: Andr\xe9 Malo
Home page: http://opensource.perlig.de/rjsmin/
Modified by Ross Peoples <ross.peoples@gmail.com> for inclusion into web2py.
"""
__author__ = "Andr\xe9 Malo"
__author__ = getattr(__author__, 'decode', lambda x: __author__)('latin-1')
__docformat__ = "restructuredtext en"
__license__ = "Apache License, Version 2.0"
__version__ = '1.0.2'
__all__ = ['jsmin', 'jsmin_for_posers']
import re as _re
def _make_jsmin(extended=True, python_only=True):
"""
Generate JS minifier based on `jsmin.c by Douglas Crockford`_
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`extended` : ``bool``
Extended Regexps? (using lookahead and lookbehind). This is faster,
because it can be optimized way more. The regexps used with `extended`
being false are only left here to allow easier porting to platforms
without extended regex features (and for my own reference...)
`python_only` : ``bool``
Use only the python variant. If true, the c extension is not even
tried to be loaded.
:Return: Minifier
:Rtype: ``callable``
"""
# pylint: disable = R0912, R0914, W0612
if not python_only:
try:
import _rjsmin
except ImportError:
pass
else:
return _rjsmin.jsmin
try:
xrange
except NameError:
xrange = range # pylint: disable = W0622
space_chars = r'[\000-\011\013\014\016-\040]'
line_comment = r'(?://[^\r\n]*)'
space_comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
string1 = \
r'(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)'
string2 = r'(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^"\\\r\n]*)*")'
strings = r'(?:%s|%s)' % (string1, string2)
charclass = r'(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\])'
nospecial = r'[^/\\\[\r\n]'
if extended:
regex = r'(?:/(?![\r\n/*])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)' % (
nospecial, charclass, nospecial
)
else:
regex = (
r'(?:/(?:[^*/\\\r\n\[]|%s|\\[^\r\n])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)'
)
regex = regex % (charclass, nospecial, charclass, nospecial)
space = r'(?:%s|%s)' % (space_chars, space_comment)
newline = r'(?:%s?[\r\n])' % line_comment
def fix_charclass(result):
""" Fixup string of chars to fit into a regex char class """
pos = result.find('-')
if pos >= 0:
result = r'%s%s-' % (result[:pos], result[pos + 1:])
def sequentize(string):
"""
Notate consecutive characters as sequence
(1-4 instead of 1234)
"""
first, last, result = None, None, []
for char in map(ord, string):
if last is None:
first = last = char
elif last + 1 == char:
last = char
else:
result.append((first, last))
first = last = char
if last is not None:
result.append((first, last))
return ''.join(['%s%s%s' % (
chr(first),
last > first + 1 and '-' or '',
last != first and chr(last) or ''
) for first, last in result])
return _re.sub(r'([\000-\040\047])', # for better portability
lambda m: '\\%03o' % ord(m.group(1)), (sequentize(result)
.replace('\\', '\\\\')
.replace('[', '\\[')
.replace(']', '\\]')
)
)
def id_literal_(what):
""" Make id_literal like char class """
match = _re.compile(what).match
result = ''.join([
chr(c) for c in xrange(127) if not match(chr(c))
])
return '[^%s]' % fix_charclass(result)
def not_id_literal_(keep):
""" Make negated id_literal like char class """
match = _re.compile(id_literal_(keep)).match
result = ''.join([
chr(c) for c in xrange(127) if not match(chr(c))
])
return r'[%s]' % fix_charclass(result)
not_id_literal = not_id_literal_(r'[a-zA-Z0-9_$]')
preregex1 = r'[(,=:\[!&|?{};\r\n]'
preregex2 = r'%(not_id_literal)sreturn' % locals()
if extended:
id_literal = id_literal_(r'[a-zA-Z0-9_$]')
id_literal_open = id_literal_(r'[a-zA-Z0-9_${\[(+-]')
id_literal_close = id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]')
space_sub = _re.compile((
r'([^\047"/\000-\040]+)'
r'|(%(strings)s[^\047"/\000-\040]*)'
r'|(?:(?<=%(preregex1)s)%(space)s*(%(regex)s[^\047"/\000-\040]*))'
r'|(?:(?<=%(preregex2)s)%(space)s*(%(regex)s[^\047"/\000-\040]*))'
r'|(?<=%(id_literal_close)s)'
r'%(space)s*(?:(%(newline)s)%(space)s*)+'
r'(?=%(id_literal_open)s)'
r'|(?<=%(id_literal)s)(%(space)s)+(?=%(id_literal)s)'
r'|%(space)s+'
r'|(?:%(newline)s%(space)s*)+'
) % locals()).sub
def space_subber(match):
""" Substitution callback """
# pylint: disable = C0321, R0911
groups = match.groups()
if groups[0]:
return groups[0]
elif groups[1]:
return groups[1]
elif groups[2]:
return groups[2]
elif groups[3]:
return groups[3]
elif groups[4]:
return '\n'
elif groups[5]:
return ' '
else:
return ''
def jsmin(script): # pylint: disable = W0621
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`script` : ``str``
Script to minify
:Return: Minified script
:Rtype: ``str``
"""
return space_sub(space_subber, '\n%s\n' % script).strip()
else:
pre_regex = r'(?:%(preregex1)s|%(preregex2)s)' % locals()
not_id_literal_open = not_id_literal_(r'[a-zA-Z0-9_${\[(+-]')
not_id_literal_close = not_id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]')
space_norm_sub = _re.compile((
r'(%(strings)s)'
r'|(?:(%(pre_regex)s)%(space)s*(%(regex)s))'
r'|(%(space)s)+'
r'|(?:(%(newline)s)%(space)s*)+'
) % locals()).sub
def space_norm_subber(match):
""" Substitution callback """
# pylint: disable = C0321
groups = match.groups()
if groups[0]:
return groups[0]
elif groups[1]:
return groups[1].replace('\r', '\n') + groups[2]
elif groups[3]:
return ' '
elif groups[4]:
return '\n'
space_sub1 = _re.compile((
r'[\040\n]?(%(strings)s|%(pre_regex)s%(regex)s)'
r'|\040(%(not_id_literal)s)'
r'|\n(%(not_id_literal_open)s)'
) % locals()).sub
def space_subber1(match):
""" Substitution callback """
groups = match.groups()
return groups[0] or groups[1] or groups[2]
space_sub2 = _re.compile((
r'(%(strings)s)\040?'
r'|(%(pre_regex)s%(regex)s)[\040\n]?'
r'|(%(not_id_literal)s)\040'
r'|(%(not_id_literal_close)s)\n'
) % locals()).sub
def space_subber2(match):
""" Substitution callback """
groups = match.groups()
return groups[0] or groups[1] or groups[2] or groups[3]
def jsmin(script):
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach. The script is minified with three passes:
normalization
Control character are mapped to spaces, spaces and newlines
are squeezed and comments are stripped.
space removal 1
Spaces before certain tokens are removed
space removal 2
Spaces after certain tokens are remove
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`script` : ``str``
Script to minify
:Return: Minified script
:Rtype: ``str``
"""
return space_sub2(space_subber2,
space_sub1(space_subber1,
space_norm_sub(space_norm_subber,
'\n%s\n' % script)
)
).strip()
return jsmin
jsmin = _make_jsmin()
#####################
# EXAMPLE USAGE #
#####################
#
# import jsmin
# jsmin.jsmin(script)
#
def jsmin_for_posers(script):
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Warning: This function is the digest of a _make_jsmin() call. It just
utilizes the resulting regex. It's just for fun here and may
vanish any time. Use the `jsmin` function instead.
:Parameters:
`script` : ``str``
Script to minify
:Return: Minified script
:Rtype: ``str``
"""
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[2] or
groups[3] or
(groups[4] and '\n') or
(groups[5] and ' ') or
''
)
return _re.sub(
r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?'
r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|'
r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]'
r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/'
r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*'
r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*'
r'))|(?:(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\01'
r'6-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*((?:/(?![\r\n/*])[^/'
r'\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]'
r'*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*))|(?<=[^\000-!#%&(*,./'
r':-@\[\\^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/'
r'*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\01'
r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-#%-\04'
r'7)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-\011'
r'\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^\000-'
r'#%-,./:-@\[-^`{-~-])|(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*'
r'+(?:[^/*][^*]*\*+)*/))+|(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011'
r'\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+',
subber, '\n%s\n' % script
).strip()
if __name__ == '__main__':
import sys as _sys
_sys.stdout.write(jsmin(_sys.stdin.read()))
| Python |
"""Simple AES cipher implementation in pure Python following PEP-272 API
Homepage: https://bitbucket.org/intgr/pyaes/
The goal of this module is to be as fast as reasonable in Python while still
being Pythonic and readable/understandable. It is licensed under the permissive
MIT license.
Hopefully the code is readable and commented enough that it can serve as an
introduction to the AES cipher for Python coders. In fact, it should go along
well with the Stick Figure Guide to AES:
http://www.moserware.com/2009/09/stick-figure-guide-to-advanced.html
Contrary to intuition, this implementation numbers the 4x4 matrices from top to
bottom for efficiency reasons::
0 4 8 12
1 5 9 13
2 6 10 14
3 7 11 15
Effectively it's the transposition of what you'd expect. This actually makes
the code simpler -- except the ShiftRows step, but hopefully the explanation
there clears it up.
"""
####
# Copyright (c) 2010 Marti Raudsepp <marti@juffo.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
####
from array import array
# Globals mandated by PEP 272:
# http://www.python.org/dev/peps/pep-0272/
MODE_ECB = 1
MODE_CBC = 2
#MODE_CTR = 6
block_size = 16
key_size = None
def new(key, mode=MODE_CBC, IV=None):
if mode == MODE_ECB:
return ECBMode(AES(key))
elif mode == MODE_CBC:
if IV is None:
raise ValueError("CBC mode needs an IV value!")
return CBCMode(AES(key), IV)
else:
raise NotImplementedError
#### AES cipher implementation
class AES(object):
block_size = 16
def __init__(self, key):
self.setkey(key)
def setkey(self, key):
"""Sets the key and performs key expansion."""
self.key = key
self.key_size = len(key)
if self.key_size == 16:
self.rounds = 10
elif self.key_size == 24:
self.rounds = 12
elif self.key_size == 32:
self.rounds = 14
else:
raise ValueError("Key length must be 16, 24 or 32 bytes")
self.expand_key()
def expand_key(self):
"""Performs AES key expansion on self.key and stores in self.exkey"""
# The key schedule specifies how parts of the key are fed into the
# cipher's round functions. "Key expansion" means performing this
# schedule in advance. Almost all implementations do this.
#
# Here's a description of AES key schedule:
# http://en.wikipedia.org/wiki/Rijndael_key_schedule
# The expanded key starts with the actual key itself
exkey = array('B', self.key)
# extra key expansion steps
if self.key_size == 16:
extra_cnt = 0
elif self.key_size == 24:
extra_cnt = 2
else:
extra_cnt = 3
# 4-byte temporary variable for key expansion
word = exkey[-4:]
# Each expansion cycle uses 'i' once for Rcon table lookup
for i in xrange(1, 11):
#### key schedule core:
# left-rotate by 1 byte
word = word[1:4] + word[0:1]
# apply S-box to all bytes
for j in xrange(4):
word[j] = aes_sbox[word[j]]
# apply the Rcon table to the leftmost byte
word[0] = word[0] ^ aes_Rcon[i]
#### end key schedule core
for z in xrange(4):
for j in xrange(4):
# mix in bytes from the last subkey
word[j] ^= exkey[-self.key_size + j]
exkey.extend(word)
# Last key expansion cycle always finishes here
if len(exkey) >= (self.rounds+1) * self.block_size:
break
# Special substitution step for 256-bit key
if self.key_size == 32:
for j in xrange(4):
# mix in bytes from the last subkey XORed with S-box of
# current word bytes
word[j] = aes_sbox[word[j]] ^ exkey[-self.key_size + j]
exkey.extend(word)
# Twice for 192-bit key, thrice for 256-bit key
for z in xrange(extra_cnt):
for j in xrange(4):
# mix in bytes from the last subkey
word[j] ^= exkey[-self.key_size + j]
exkey.extend(word)
self.exkey = exkey
def add_round_key(self, block, round):
"""AddRoundKey step in AES. This is where the key is mixed into plaintext"""
offset = round * 16
exkey = self.exkey
for i in xrange(16):
block[i] ^= exkey[offset + i]
#print 'AddRoundKey:', block
def sub_bytes(self, block, sbox):
"""SubBytes step, apply S-box to all bytes
Depending on whether encrypting or decrypting, a different sbox array
is passed in.
"""
for i in xrange(16):
block[i] = sbox[block[i]]
#print 'SubBytes :', block
def shift_rows(self, b):
"""ShiftRows step. Shifts 2nd row to left by 1, 3rd row by 2, 4th row by 3
Since we're performing this on a transposed matrix, cells are numbered
from top to bottom::
0 4 8 12 -> 0 4 8 12 -- 1st row doesn't change
1 5 9 13 -> 5 9 13 1 -- row shifted to left by 1 (wraps around)
2 6 10 14 -> 10 14 2 6 -- shifted by 2
3 7 11 15 -> 15 3 7 11 -- shifted by 3
"""
b[1], b[5], b[ 9], b[13] = b[ 5], b[ 9], b[13], b[ 1]
b[2], b[6], b[10], b[14] = b[10], b[14], b[ 2], b[ 6]
b[3], b[7], b[11], b[15] = b[15], b[ 3], b[ 7], b[11]
#print 'ShiftRows :', b
def shift_rows_inv(self, b):
"""Similar to shift_rows above, but performed in inverse for decryption."""
b[ 5], b[ 9], b[13], b[ 1] = b[1], b[5], b[ 9], b[13]
b[10], b[14], b[ 2], b[ 6] = b[2], b[6], b[10], b[14]
b[15], b[ 3], b[ 7], b[11] = b[3], b[7], b[11], b[15]
#print 'ShiftRows :', b
def mix_columns(self, block):
"""MixColumns step. Mixes the values in each column"""
# Cache global multiplication tables (see below)
mul_by_2 = gf_mul_by_2
mul_by_3 = gf_mul_by_3
# Since we're dealing with a transposed matrix, columns are already
# sequential
for i in xrange(4):
col = i * 4
#v0, v1, v2, v3 = block[col : col+4]
v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2],
block[col + 3])
block[col ] = mul_by_2[v0] ^ v3 ^ v2 ^ mul_by_3[v1]
block[col+1] = mul_by_2[v1] ^ v0 ^ v3 ^ mul_by_3[v2]
block[col+2] = mul_by_2[v2] ^ v1 ^ v0 ^ mul_by_3[v3]
block[col+3] = mul_by_2[v3] ^ v2 ^ v1 ^ mul_by_3[v0]
#print 'MixColumns :', block
def mix_columns_inv(self, block):
"""Similar to mix_columns above, but performed in inverse for decryption."""
# Cache global multiplication tables (see below)
mul_9 = gf_mul_by_9
mul_11 = gf_mul_by_11
mul_13 = gf_mul_by_13
mul_14 = gf_mul_by_14
# Since we're dealing with a transposed matrix, columns are already
# sequential
for i in xrange(4):
col = i * 4
v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2],
block[col + 3])
#v0, v1, v2, v3 = block[col:col+4]
block[col ] = mul_14[v0] ^ mul_9[v3] ^ mul_13[v2] ^ mul_11[v1]
block[col+1] = mul_14[v1] ^ mul_9[v0] ^ mul_13[v3] ^ mul_11[v2]
block[col+2] = mul_14[v2] ^ mul_9[v1] ^ mul_13[v0] ^ mul_11[v3]
block[col+3] = mul_14[v3] ^ mul_9[v2] ^ mul_13[v1] ^ mul_11[v0]
#print 'MixColumns :', block
def encrypt_block(self, block):
"""Encrypts a single block. This is the main AES function"""
# For efficiency reasons, the state between steps is transmitted via a
# mutable array, not returned.
self.add_round_key(block, 0)
for round in xrange(1, self.rounds):
self.sub_bytes(block, aes_sbox)
self.shift_rows(block)
self.mix_columns(block)
self.add_round_key(block, round)
self.sub_bytes(block, aes_sbox)
self.shift_rows(block)
# no mix_columns step in the last round
self.add_round_key(block, self.rounds)
def decrypt_block(self, block):
"""Decrypts a single block. This is the main AES decryption function"""
# For efficiency reasons, the state between steps is transmitted via a
# mutable array, not returned.
self.add_round_key(block, self.rounds)
# count rounds down from 15 ... 1
for round in xrange(self.rounds-1, 0, -1):
self.shift_rows_inv(block)
self.sub_bytes(block, aes_inv_sbox)
self.add_round_key(block, round)
self.mix_columns_inv(block)
self.shift_rows_inv(block)
self.sub_bytes(block, aes_inv_sbox)
self.add_round_key(block, 0)
# no mix_columns step in the last round
#### ECB mode implementation
class ECBMode(object):
"""Electronic CodeBook (ECB) mode encryption.
Basically this mode applies the cipher function to each block individually;
no feedback is done. NB! This is insecure for almost all purposes
"""
def __init__(self, cipher):
self.cipher = cipher
self.block_size = cipher.block_size
def ecb(self, data, block_func):
"""Perform ECB mode with the given function"""
if len(data) % self.block_size != 0:
raise ValueError("Plaintext length must be multiple of 16")
block_size = self.block_size
data = array('B', data)
for offset in xrange(0, len(data), block_size):
block = data[offset : offset+block_size]
block_func(block)
data[offset : offset+block_size] = block
return data.tostring()
def encrypt(self, data):
"""Encrypt data in ECB mode"""
return self.ecb(data, self.cipher.encrypt_block)
def decrypt(self, data):
"""Decrypt data in ECB mode"""
return self.ecb(data, self.cipher.decrypt_block)
#### CBC mode
class CBCMode(object):
"""Cipher Block Chaining (CBC) mode encryption. This mode avoids content leaks.
In CBC encryption, each plaintext block is XORed with the ciphertext block
preceding it; decryption is simply the inverse.
"""
# A better explanation of CBC can be found here:
# http://en.wikipedia.org/wiki/Block_cipher_modes_of_operation#Cipher-block_chaining_.28CBC.29
def __init__(self, cipher, IV):
self.cipher = cipher
self.block_size = cipher.block_size
self.IV = array('B', IV)
def encrypt(self, data):
"""Encrypt data in CBC mode"""
block_size = self.block_size
if len(data) % block_size != 0:
raise ValueError("Plaintext length must be multiple of 16")
data = array('B', data)
IV = self.IV
for offset in xrange(0, len(data), block_size):
block = data[offset : offset+block_size]
# Perform CBC chaining
for i in xrange(block_size):
block[i] ^= IV[i]
self.cipher.encrypt_block(block)
data[offset : offset+block_size] = block
IV = block
self.IV = IV
return data.tostring()
def decrypt(self, data):
"""Decrypt data in CBC mode"""
block_size = self.block_size
if len(data) % block_size != 0:
raise ValueError("Ciphertext length must be multiple of 16")
data = array('B', data)
IV = self.IV
for offset in xrange(0, len(data), block_size):
ctext = data[offset : offset+block_size]
block = ctext[:]
self.cipher.decrypt_block(block)
# Perform CBC chaining
#for i in xrange(block_size):
# data[offset + i] ^= IV[i]
for i in xrange(block_size):
block[i] ^= IV[i]
data[offset : offset+block_size] = block
IV = ctext
#data[offset : offset+block_size] = block
self.IV = IV
return data.tostring()
####
def galois_multiply(a, b):
"""Galois Field multiplicaiton for AES"""
p = 0
while b:
if b & 1:
p ^= a
a <<= 1
if a & 0x100:
a ^= 0x1b
b >>= 1
return p & 0xff
# Precompute the multiplication tables for encryption
gf_mul_by_2 = array('B', [galois_multiply(x, 2) for x in range(256)])
gf_mul_by_3 = array('B', [galois_multiply(x, 3) for x in range(256)])
# ... for decryption
gf_mul_by_9 = array('B', [galois_multiply(x, 9) for x in range(256)])
gf_mul_by_11 = array('B', [galois_multiply(x, 11) for x in range(256)])
gf_mul_by_13 = array('B', [galois_multiply(x, 13) for x in range(256)])
gf_mul_by_14 = array('B', [galois_multiply(x, 14) for x in range(256)])
####
# The S-box is a 256-element array, that maps a single byte value to another
# byte value. Since it's designed to be reversible, each value occurs only once
# in the S-box
#
# More information: http://en.wikipedia.org/wiki/Rijndael_S-box
aes_sbox = array('B',
'637c777bf26b6fc53001672bfed7ab76'
'ca82c97dfa5947f0add4a2af9ca472c0'
'b7fd9326363ff7cc34a5e5f171d83115'
'04c723c31896059a071280e2eb27b275'
'09832c1a1b6e5aa0523bd6b329e32f84'
'53d100ed20fcb15b6acbbe394a4c58cf'
'd0efaafb434d338545f9027f503c9fa8'
'51a3408f929d38f5bcb6da2110fff3d2'
'cd0c13ec5f974417c4a77e3d645d1973'
'60814fdc222a908846eeb814de5e0bdb'
'e0323a0a4906245cc2d3ac629195e479'
'e7c8376d8dd54ea96c56f4ea657aae08'
'ba78252e1ca6b4c6e8dd741f4bbd8b8a'
'703eb5664803f60e613557b986c11d9e'
'e1f8981169d98e949b1e87e9ce5528df'
'8ca1890dbfe6426841992d0fb054bb16'.decode('hex')
)
# This is the inverse of the above. In other words:
# aes_inv_sbox[aes_sbox[val]] == val
aes_inv_sbox = array('B',
'52096ad53036a538bf40a39e81f3d7fb'
'7ce339829b2fff87348e4344c4dee9cb'
'547b9432a6c2233dee4c950b42fac34e'
'082ea16628d924b2765ba2496d8bd125'
'72f8f66486689816d4a45ccc5d65b692'
'6c704850fdedb9da5e154657a78d9d84'
'90d8ab008cbcd30af7e45805b8b34506'
'd02c1e8fca3f0f02c1afbd0301138a6b'
'3a9111414f67dcea97f2cfcef0b4e673'
'96ac7422e7ad3585e2f937e81c75df6e'
'47f11a711d29c5896fb7620eaa18be1b'
'fc563e4bc6d279209adbc0fe78cd5af4'
'1fdda8338807c731b11210592780ec5f'
'60517fa919b54a0d2de57a9f93c99cef'
'a0e03b4dae2af5b0c8ebbb3c83539961'
'172b047eba77d626e169146355210c7d'.decode('hex')
)
# The Rcon table is used in AES's key schedule (key expansion)
# It's a pre-computed table of exponentation of 2 in AES's finite field
#
# More information: http://en.wikipedia.org/wiki/Rijndael_key_schedule
aes_Rcon = array('B',
'8d01020408102040801b366cd8ab4d9a'
'2f5ebc63c697356ad4b37dfaefc59139'
'72e4d3bd61c29f254a943366cc831d3a'
'74e8cb8d01020408102040801b366cd8'
'ab4d9a2f5ebc63c697356ad4b37dfaef'
'c5913972e4d3bd61c29f254a943366cc'
'831d3a74e8cb8d01020408102040801b'
'366cd8ab4d9a2f5ebc63c697356ad4b3'
'7dfaefc5913972e4d3bd61c29f254a94'
'3366cc831d3a74e8cb8d010204081020'
'40801b366cd8ab4d9a2f5ebc63c69735'
'6ad4b37dfaefc5913972e4d3bd61c29f'
'254a943366cc831d3a74e8cb8d010204'
'08102040801b366cd8ab4d9a2f5ebc63'
'c697356ad4b37dfaefc5913972e4d3bd'
'61c29f254a943366cc831d3a74e8cb'.decode('hex')
)
| Python |
SMSCODES = {
'Aliant': '@chat.wirefree.ca',
'Alltel': '@message.alltel.com',
'Ameritech': '@paging.acswireless.com',
'AT&T': '@txt.att.net',
'AU by KDDI': '@ezweb.ne.jp',
'BeeLine GSM': '@sms.beemail.ru',
'Bell Mobility Canada': '@txt.bellmobility.ca',
'Bellsouth': '@bellsouth.cl',
'BellSouth Mobility': '@blsdcs.net',
'Blue Sky Frog': '@blueskyfrog.com',
'Boost': '@myboostmobile.com',
'Cellular South': '@csouth1.com',
'CellularOne': '@mobile.celloneusa.com',
'CellularOne West': '@mycellone.com',
'Cincinnati Bell': '@gocbw.com',
'Claro': '@clarotorpedo.com.br',
'Comviq': '@sms.comviq.se',
'Dutchtone/Orange-NL': '@sms.orange.nl',
'Edge Wireless': '@sms.edgewireless.com',
'EinsteinPCS / Airadigm Communications': '@einsteinsms.com',
'EPlus': '@smsmail.eplus.de',
'Fido Canada': '@fido.ca',
'Golden Telecom': '@sms.goldentele.com',
'Idea Cellular': '@ideacellular.net',
'Kyivstar': '@sms.kyivstar.net',
'LMT': '@sms.lmt.lv',
'Manitoba Telecom Systems': '@text.mtsmobility.com',
'Meteor': '@sms.mymeteor.ie',
'Metro PCS': '@mymetropcs.com',
'Metrocall Pager': '@page.metrocall.com',
'MobileOne': '@m1.com.sg',
'Mobilfone': '@page.mobilfone.com',
'Mobility Bermuda': '@ml.bm',
'Netcom': '@sms.netcom.no',
'Nextel': '@messaging.nextel.com',
'NPI Wireless': '@npiwireless.com',
'O2': '@o2.co.uk',
'O2 M-mail': '@mmail.co.uk',
'Optus': '@optusmobile.com.au',
'Orange': '@orange.net',
'Oskar': '@mujoskar.cz',
'Pagenet': '@pagenet.net',
'PCS Rogers': '@pcs.rogers.com',
'Personal Communication': '@pcom.ru',
'Plus GSM Poland': '@text.plusgsm.pl',
'Powertel': '@ptel.net',
'Primtel': '@sms.primtel.ru',
'PSC Wireless': '@sms.pscel.com',
'Qualcomm': '@pager.qualcomm.com',
'Qwest': '@qwestmp.com',
'Safaricom': '@safaricomsms.com',
'Satelindo GSM': '@satelindogsm.com',
'SCS-900': '@scs-900.ru',
'Simple Freedom': '@text.simplefreedom.net',
'Skytel - Alphanumeric': '@skytel.com',
'Smart Telecom': '@mysmart.mymobile.ph',
'Southern Linc': '@page.southernlinc.com',
'Sprint PCS': '@messaging.sprintpcs.com',
'Sprint PCS - Short Mail': '@sprintpcs.com',
'SunCom': '@tms.suncom.com',
'SureWest Communications': '@mobile.surewest.com',
'SwissCom Mobile': '@bluewin.ch',
'T-Mobile Germany': '@T-D1-SMS.de',
'T-Mobile Netherlands': '@gin.nl',
'T-Mobile UK': '@t-mobile.uk.net',
'T-Mobile USA (tmail)': '@tmail.com',
'T-Mobile USA (tmomail)': '@tmomail.net',
'Tele2 Latvia': '@sms.tele2.lv',
'Telefonica Movistar': '@movistar.net',
'Telenor': '@mobilpost.no',
'Telia Denmark': '@gsm1800.telia.dk',
'Telus Mobility': '@msg.telus.com',
'The Phone House': '@sms.phonehouse.de',
'TIM': '@timnet.com',
'UMC': '@sms.umc.com.ua',
'Unicel': '@utext.com',
'US Cellular': '@email.uscc.net',
'Verizon Wireless (vtext)': '@vtext.com',
'Verizon Wireless (airtouchpaging)': '@airtouchpaging.com',
'Verizon Wireless (myairmail)': '@myairmail.com',
'Vessotel': '@pager.irkutsk.ru',
'Virgin Mobile Canada': '@vmobile.ca',
'Virgin Mobile USA': '@vmobl.com',
'Vodafone Italy': '@sms.vodafone.it',
'Vodafone Japan (n)': '@n.vodafone.ne.jp',
'Vodafone Japan (d)': '@d.vodafone.ne.jp',
'Vodafone Japan (r)': '@r.vodafone.ne.jp',
'Vodafone Japan (k)': '@k.vodafone.ne.jp',
'Vodafone Japan (t)': '@t.vodafone.ne.jp',
'Vodafone Japan (q)': '@q.vodafone.ne.jp',
'Vodafone Japan (s)': '@s.vodafone.ne.jp',
'Vodafone Japan (h)': '@h.vodafone.ne.jp',
'Vodafone Japan (c)': '@c.vodafone.ne.jp',
'Vodafone Spain': '@vodafone.es',
'Vodafone UK': '@vodafone.net',
'Weblink Wireless': '@airmessage.net',
'WellCom': '@sms.welcome2well.com',
'WyndTell': '@wyndtell.com',
}
def sms_email(number, provider):
"""
>>> print sms_email('1 (312) 375-6536','T-Mobile USA (tmail)')
print 13123756536@tmail.com
"""
import re
if number[0] == '+1':
number = number[1:]
elif number[0] == '+':
number = number[3:]
elif number[:2] == '00': number = number[3:]
number = re.sub('[^\d]', '', number)
return number + SMSCODES[provider]
| Python |
"""
Developed by 616d41631bff906704951934ffe4015e
Released under web2py license because includes gluon/cache.py source code
"""
import redis
from redis.exceptions import ConnectionError
from gluon import current
from gluon.cache import CacheAbstract
import cPickle as pickle
import time
import re
import logging
import thread
logger = logging.getLogger("web2py.cache.redis")
locker = thread.allocate_lock()
def RedisCache(*args, **vars):
"""
Usage example: put in models
from gluon.contrib.redis_cache import RedisCache
cache.redis = RedisCache('localhost:6379',db=None, debug=True)
cache.redis.stats()
return a dictionary with statistics of Redis server
with one additional key ('w2p_keys') showing all keys currently set
from web2py with their TTL
if debug=True additional tracking is activate and another key is added
('w2p_stats') showing total_hits and misses
"""
locker.acquire()
try:
if not hasattr(RedisCache, 'redis_instance'):
RedisCache.redis_instance = RedisClient(*args, **vars)
finally:
locker.release()
return RedisCache.redis_instance
class RedisClient(object):
meta_storage = {}
MAX_RETRIES = 5
RETRIES = 0
def __init__(self, server='localhost:6379', db=None, debug=False):
self.server = server
self.db = db or 0
host, port = (self.server.split(':') + ['6379'])[:2]
port = int(port)
self.request = current.request
self.debug = debug
if self.request:
app = self.request.application
else:
app = ''
if not app in self.meta_storage:
self.storage = self.meta_storage[app] = {
CacheAbstract.cache_stats_name: {
'hit_total': 0,
'misses': 0,
}}
else:
self.storage = self.meta_storage[app]
self.r_server = redis.Redis(host=host, port=port, db=self.db)
def __call__(self, key, f, time_expire=300):
try:
if time_expire is None:
time_expire = 24 * 60 * 60
newKey = self.__keyFormat__(key)
value = None
obj = self.r_server.get(newKey)
ttl = self.r_server.ttl(newKey) or 0
if ttl > time_expire:
obj = None
if obj:
if self.debug:
self.r_server.incr('web2py_cache_statistics:hit_total')
value = pickle.loads(obj)
elif f is None:
self.r_server.delete(newKey)
else:
if self.debug:
self.r_server.incr('web2py_cache_statistics:misses')
value = f()
if time_expire == 0:
time_expire = 1
self.r_server.setex(newKey, pickle.dumps(value), time_expire)
return value
except ConnectionError:
return self.retry_call(key, f, time_expire)
def retry_call(self, key, f, time_expire):
self.RETRIES += 1
if self.RETRIES <= self.MAX_RETRIES:
logger.error("sleeping %s seconds before reconnecting" %
(2 * self.RETRIES))
time.sleep(2 * self.RETRIES)
self.__init__(self.server, self.db, self.debug)
return self.__call__(key, f, time_expire)
else:
self.RETRIES = 0
raise ConnectionError('Redis instance is unavailable at %s' % (
self.server))
def increment(self, key, value=1, time_expire=300):
try:
newKey = self.__keyFormat__(key)
obj = self.r_server.get(newKey)
if obj:
return self.r_server.incr(newKey, value)
else:
self.r_server.setex(newKey, value, time_expire)
return value
except ConnectionError:
return self.retry_increment(key, value, time_expire)
def retry_increment(self, key, value, time_expire):
self.RETRIES += 1
if self.RETRIES <= self.MAX_RETRIES:
logger.error("sleeping some seconds before reconnecting")
time.sleep(2 * self.RETRIES)
self.__init__(self.server, self.db, self.debug)
return self.increment(key, value, time_expire)
else:
self.RETRIES = 0
raise ConnectionError('Redis instance is unavailable at %s' % (
self.server))
def clear(self, regex):
"""
Auxiliary function called by `clear` to search and
clear cache entries
"""
r = re.compile(regex)
prefix = "w2p:%s:" % (self.request.application)
pipe = self.r_server.pipeline()
for a in self.r_server.keys("%s*" %
(prefix)):
if r.match(str(a).replace(prefix, '', 1)):
pipe.delete(a)
pipe.execute()
def stats(self):
statscollector = self.r_server.info()
if self.debug:
statscollector['w2p_stats'] = dict(
hit_total=self.r_server.get(
'web2py_cache_statistics:hit_total'),
misses=self.r_server.get('web2py_cache_statistics:misses')
)
statscollector['w2p_keys'] = dict()
for a in self.r_server.keys("w2p:%s:*" % (
self.request.application)):
statscollector['w2p_keys']["%s_expire_in_sec" % (a)] = \
self.r_server.ttl(a)
return statscollector
def __keyFormat__(self, key):
return 'w2p:%s:%s' % (self.request.application,
key.replace(' ', '_'))
| Python |
"""
Developed by Massimo Di Pierro
Released under the web2py license (LGPL)
It an interface on top of urllib2 which simplifies scripting of http requests
mostly for testing purposes
- customizable
- supports basic auth
- supports cookies
- supports session cookies (tested with web2py sessions)
- detects broken session
- detects web2py form postbacks and handles formname and formkey
- detects web2py tickets
Some examples at the bottom.
"""
import re
import time
import urllib
import urllib2
DEFAULT_HEADERS = {
'user-agent': 'Mozilla/4.0', # some servers are picky
'accept-language': 'en',
}
FORM_REGEX = re.compile('(\<input name\="_formkey" type\="hidden" value\="(?P<formkey>.+?)" \/\>)?\<input name\="_formname" type\="hidden" value\="(?P<formname>.+?)" \/\>')
SESSION_REGEX = 'session_id_(?P<name>.+)'
class WebClient(object):
def __init__(self,
app='',
postbacks=True,
default_headers=DEFAULT_HEADERS,
session_regex=SESSION_REGEX):
self.app = app
self.postbacks = postbacks
self.forms = {}
self.history = []
self.cookies = {}
self.default_headers = default_headers
self.sessions = {}
self.session_regex = session_regex and re.compile(session_regex)
def get(self, url, cookies=None, headers=None, auth=None):
return self.post(url, data=None, cookies=cookies, headers=headers)
def post(self, url, data=None, cookies=None, headers=None, auth=None):
self.url = self.app + url
# if this POST form requires a postback do it
if data and '_formname' in data and self.postbacks and \
self.history and self.history[-1][1] != self.url:
# to bypass the web2py CSRF need to get formkey
# before submitting the form
self.get(url, cookies=cookies, headers=headers, auth=auth)
# unless cookies are specified, recycle cookies
if cookies is None:
cookies = self.cookies
cookies = cookies or {}
headers = headers or {}
# if required do basic auth
if auth:
auth_handler = urllib2.HTTPBasicAuthHandler()
auth_handler.add_password(**auth)
opener = urllib2.build_opener(auth_handler)
else:
opener = urllib2.build_opener()
# copy headers from dict to list of key,value
headers_list = []
for key, value in self.default_headers.iteritems():
if not key in headers:
headers[key] = value
for key, value in headers.iteritems():
if isinstance(value, (list, tuple)):
for v in value:
headers_list.append((key, v))
else:
headers_list.append((key, value))
# move cookies to headers
for key, value in cookies.iteritems():
headers_list.append(('Cookie', '%s=%s' % (key, value)))
# add headers to request
for key, value in headers_list:
opener.addheaders.append((key, str(value)))
# assume everything is ok and make http request
error = None
try:
if data is not None:
self.method = 'POST'
# if there is only one form, set _formname automatically
if not '_formname' in data and len(self.forms) == 1:
data['_formname'] = self.forms.keys()[0]
# if there is no formkey but it is known, set it
if '_formname' in data and not '_formkey' in data and \
data['_formname'] in self.forms:
data['_formkey'] = self.forms[data['_formname']]
# time the POST request
data = urllib.urlencode(data)
t0 = time.time()
self.response = opener.open(self.url, data)
self.time = time.time() - t0
else:
self.method = 'GET'
# time the GET request
t0 = time.time()
self.response = opener.open(self.url)
self.time = time.time() - t0
except urllib2.HTTPError, error:
# catch HTTP errors
self.time = time.time() - t0
self.response = error
if hasattr(self.response, 'getcode'):
self.status = self.response.getcode()
else:#python2.5
self.status = None
self.text = self.response.read()
self.headers = dict(self.response.headers)
# treat web2py tickets as special types of errors
if error is not None:
if 'web2py_error' in self.headers:
raise RuntimeError(self.headers['web2py_error'])
else:
raise error
# parse headers into cookies
self.cookies = {}
if 'set-cookie' in self.headers:
for item in self.headers['set-cookie'].split(','):
key, value = item[:item.find(';')].split('=')
self.cookies[key.strip()] = value.strip()
# check is a new session id has been issued, symptom of broken session
if self.session_regex is not None:
for cookie, value in self.cookies.iteritems():
match = self.session_regex.match(cookie)
if match:
name = match.group('name')
if name in self.sessions and self.sessions[name] != value:
raise RuntimeError('Broken sessions %s' % name)
self.sessions[name] = value
# find all forms and formkeys in page
self.forms = {}
for match in FORM_REGEX.finditer(self.text):
self.forms[match.group('formname')] = match.group('formkey')
# log this request
self.history.append((self.method, self.url, self.status, self.time))
def test_web2py_registration_and_login():
# from gluon.contrib.webclient import WebClient
# start a web2py instance for testing
client = WebClient('http://127.0.0.1:8000/welcome/default/')
client.get('index')
# register
data = dict(first_name='Homer',
last_name='Simpson',
email='homer@web2py.com',
password='test',
password_two='test',
_formname='register')
client.post('user/register', data=data)
# logout
client.get('user/logout')
# login
data = dict(email='homer@web2py.com',
password='test',
_formname='login')
client.post('user/login', data=data)
# check registration and login were successful
client.get('user/profile')
assert 'Welcome Homer' in client.text
# print some variables
print '\nsessions:\n', client.sessions
print '\nheaders:\n', client.headers
print '\ncookies:\n', client.cookies
print '\nforms:\n', client.forms
print
for method, url, status, t in client.history:
print method, url, status, t
if __name__ == '__main__':
test_web2py_registration_and_login()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# # Creates a taskbar icon for web2py
# # Author: Mark Larsen, mostly stolen from Mark Hammond's
# # C:\Python25\Lib\site-packages\win32\Demos\win32gui_taskbar.py
# # 11/7/08
# dual licensed under the web2py license (LGPL) and the Python license.
import os
import sys
import base64
import win32con
import win32api
import win32gui
class TaskBarIcon:
def __init__(self, iconPath=None):
self.iconPath = iconPath
self.status = []
msg_TaskbarRestart = \
win32api.RegisterWindowMessage('TaskbarCreated')
message_map = {
msg_TaskbarRestart: self.OnRestart,
win32con.WM_DESTROY: self.OnDestroy,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_USER + 20: self.OnTaskbarNotify,
}
# Register the Window class.
wc = win32gui.WNDCLASS()
hinst = wc.hInstance = win32api.GetModuleHandle(None)
wc.lpszClassName = 'web2pyTaskbar'
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hCursor = win32gui.LoadCursor(0, win32con.IDC_ARROW)
wc.hbrBackground = win32con.COLOR_WINDOW
wc.lpfnWndProc = message_map # could also specify a wndproc.
classAtom = win32gui.RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self.hwnd = win32gui.CreateWindow(
classAtom,
'web2pyTaskbar',
style,
0,
0,
win32con.CW_USEDEFAULT,
win32con.CW_USEDEFAULT,
0,
0,
hinst,
None,
)
win32gui.UpdateWindow(self.hwnd)
self.SetServerStopped()
def __createIcon(self):
# try and use custom icon
if self.iconPath and os.path.isfile(self.iconPath):
hicon = self.__loadFromFile(self.iconPath)
else:
try:
fp = 'tmp.ico'
icFH = file(fp, 'wb')
if self.serverState == self.EnumServerState.STOPPED:
icFH.write(base64.b64decode(self.__getIconStopped()))
elif self.serverState == self.EnumServerState.RUNNING:
icFH.write(base64.b64decode(self.__getIconRunning()))
icFH.close()
hicon = self.__loadFromFile(fp)
os.unlink(fp)
except:
print "Can't load web2py icons - using default"
hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION)
flags = win32gui.NIF_ICON | win32gui.NIF_MESSAGE\
| win32gui.NIF_TIP
nid = (
self.hwnd,
0,
flags,
win32con.WM_USER + 20,
hicon,
'web2py Framework',
)
try:
win32gui.Shell_NotifyIcon(win32gui.NIM_MODIFY, nid)
except:
try:
win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, nid)
except win32api.error:
# This is common when windows is starting, and this code is hit
# before the taskbar has been created.
print 'Failed to add the taskbar icon - is explorer running?'
# but keep running anyway - when explorer starts, we get the
def OnRestart(
self,
hwnd,
msg,
wparam,
lparam,
):
self._DoCreateIcons()
def OnDestroy(
self,
hwnd,
msg,
wparam,
lparam,
):
nid = (self.hwnd, 0)
win32gui.Shell_NotifyIcon(win32gui.NIM_DELETE, nid)
def OnTaskbarNotify(
self,
hwnd,
msg,
wparam,
lparam,
):
if lparam == win32con.WM_LBUTTONUP:
pass
elif lparam == win32con.WM_LBUTTONDBLCLK:
pass
elif lparam == win32con.WM_RBUTTONUP:
menu = win32gui.CreatePopupMenu()
win32gui.AppendMenu(menu, win32con.MF_STRING, 1023,
'Toggle Display')
win32gui.AppendMenu(menu, win32con.MF_SEPARATOR, 0, '')
if self.serverState == self.EnumServerState.STOPPED:
win32gui.AppendMenu(menu, win32con.MF_STRING, 1024,
'Start Server')
win32gui.AppendMenu(menu, win32con.MF_STRING
| win32con.MF_GRAYED, 1025,
'Restart Server')
win32gui.AppendMenu(menu, win32con.MF_STRING
| win32con.MF_GRAYED, 1026,
'Stop Server')
else:
win32gui.AppendMenu(menu, win32con.MF_STRING
| win32con.MF_GRAYED, 1024,
'Start Server')
win32gui.AppendMenu(menu, win32con.MF_STRING, 1025,
'Restart Server')
win32gui.AppendMenu(menu, win32con.MF_STRING, 1026,
'Stop Server')
win32gui.AppendMenu(menu, win32con.MF_SEPARATOR, 0, '')
win32gui.AppendMenu(menu, win32con.MF_STRING, 1027,
'Quit (pid:%i)' % os.getpid())
pos = win32gui.GetCursorPos()
# See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp
win32gui.SetForegroundWindow(self.hwnd)
win32gui.TrackPopupMenu(
menu,
win32con.TPM_LEFTALIGN,
pos[0],
pos[1],
0,
self.hwnd,
None,
)
win32api.PostMessage(self.hwnd, win32con.WM_NULL, 0, 0)
return 1
def OnCommand(
self,
hwnd,
msg,
wparam,
lparam,
):
id = win32api.LOWORD(wparam)
if id == 1023:
self.status.append(self.EnumStatus.TOGGLE)
elif id == 1024:
self.status.append(self.EnumStatus.START)
elif id == 1025:
self.status.append(self.EnumStatus.RESTART)
elif id == 1026:
self.status.append(self.EnumStatus.STOP)
elif id == 1027:
self.status.append(self.EnumStatus.QUIT)
self.Destroy()
else:
print 'Unknown command -', id
def Destroy(self):
win32gui.DestroyWindow(self.hwnd)
def SetServerRunning(self):
self.serverState = self.EnumServerState.RUNNING
self.__createIcon()
def SetServerStopped(self):
self.serverState = self.EnumServerState.STOPPED
self.__createIcon()
def __getIconRunning(self):
return 'AAABAAEAEBAQAAAAAAAoAQAAFgAAACgAAAAQAAAAIAAAAAEABAAAAAAAgAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAIXMGAABe/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABERAgAAIAAAEAACAAAgAAABEAIiACIgAAABAgAgIAIAEAECACAgAgABEAIiACACAAAAAAAAAAAAICACIiAiIAICAgIAACACAgICAgAAIAICAgICIiAiIAICAgIAACACAgICAgAAIAICAgICIiAiIAAAAAAAAAAAD//wAAhe8AAL3vAADMYwAA9a0AALWtAADMbQAA//8AAKwjAABV7QAAVe0AAFQjAABV7QAAVe0AAFQjAAD//wAA'
def __getIconStopped(self):
return 'AAABAAEAEBAQAAEABAAoAQAAFgAAACgAAAAQAAAAIAAAAAEABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJCdIAIXMGAABe/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAzMzMzMzMzAwERMjMzIzAzEDMyMzMjAzMxAzIiMyAjMzMwMjMjAzIzEzECMyAjMjMxEzAiAyMyMzMzMwAzMzMzIyMyACMiIzIyMjAzAyMyMjIyAjMwIzIyMjAyIiMCIzIyAjIzMyAyMjAyMjMzIwIyAjIyIiMiIDAzMzMzMzMzB//gAAhe0AAJ3rAADMYwAA9a0AALGNAADMLQAA/n8AAKwjAABVrQAAUc0AAFQjAABF5QAAVekAABQhAAB//gAA'
def __loadFromFile(self, iconPath):
hinst = win32api.GetModuleHandle(None)
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
hicon = win32gui.LoadImage(
hinst,
iconPath,
win32con.IMAGE_ICON,
0,
0,
icon_flags,
)
return hicon
class EnumStatus:
TOGGLE = 0
START = 1
STOP = 2
RESTART = 3
QUIT = 4
class EnumServerState:
RUNNING = 0
STOPPED = 1
| Python |
"""
AIM class to credit card payment with authorize.net
Fork of authnet code written by John Conde
http://www.johnconde.net/blog/integrate-the-authorizenet-aim-api-with-python-3-2/
Unkown license, assuming public domain
Modifed by Massimo Di Pierro
- ported from Python 3.x run on Python 2.4+
- fixed a couple of bugs
- merged with test so single file
- namedtuple from http://code.activestate.com/recipes/500261/
"""
__all__ = ['AIM']
from operator import itemgetter
import urllib
_known_tuple_types = {}
class NamedTupleBase(tuple):
"""Base class for named tuples with the __new__ operator set, named tuples
yielded by the namedtuple() function will subclass this and add
properties."""
def __new__(cls, *args, **kws):
"""Create a new instance of this fielded tuple"""
# May need to unpack named field values here
if kws:
values = list(args) + [None] * (len(cls._fields) - len(args))
fields = dict((val, idx) for idx, val in enumerate(cls._fields))
for kw, val in kws.iteritems():
assert kw in kws, "%r not in field list" % kw
values[fields[kw]] = val
args = tuple(values)
return tuple.__new__(cls, args)
def namedtuple(typename, fieldnames):
"""
>>> import namedtuples
>>> tpl = namedtuples.namedtuple(['a', 'b', 'c'])
>>> tpl(1, 2, 3)
(1, 2, 3)
>>> tpl(1, 2, 3).b
2
>>> tpl(c=1, a=2, b=3)
(2, 3, 1)
>>> tpl(c=1, a=2, b=3).b
3
>>> tpl(c='pads with nones')
(None, None, 'pads with nones')
>>> tpl(b='pads with nones')
(None, 'pads with nones', None)
>>>
"""
# Split up a string, some people do this
if isinstance(fieldnames, basestring):
fieldnames = fieldnames.replace(',', ' ').split()
# Convert anything iterable that enumerates fields to a tuple now
fieldname_tuple = tuple(str(field) for field in fieldnames)
# See if we've cached this
if fieldname_tuple in _known_tuple_types:
return _known_tuple_types[fieldname_tuple]
# Make the type
new_tuple_type = type(typename, (NamedTupleBase,), {})
# Set the hidden field
new_tuple_type._fields = fieldname_tuple
# Add the getters
for i, field in enumerate(fieldname_tuple):
setattr(new_tuple_type, field, property(itemgetter(i)))
# Cache
_known_tuple_types[fieldname_tuple] = new_tuple_type
# Done
return new_tuple_type
class AIM:
class AIMError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return str(self.parameter)
def __init__(self, login, transkey, testmode=False):
if str(login).strip() == '' or login is None:
raise AIM.AIMError('No login name provided')
if str(transkey).strip() == '' or transkey is None:
raise AIM.AIMError('No transaction key provided')
if testmode != True and testmode != False:
raise AIM.AIMError('Invalid value for testmode. Must be True or False. "{0}" given.'.format(testmode))
self.testmode = testmode
self.proxy = None
self.delimiter = '|'
self.results = []
self.error = True
self.success = False
self.declined = False
self.parameters = {}
self.setParameter('x_delim_data', 'true')
self.setParameter('x_delim_char', self.delimiter)
self.setParameter('x_relay_response', 'FALSE')
self.setParameter('x_url', 'FALSE')
self.setParameter('x_version', '3.1')
self.setParameter('x_method', 'CC')
self.setParameter('x_type', 'AUTH_CAPTURE')
self.setParameter('x_login', login)
self.setParameter('x_tran_key', transkey)
def process(self):
encoded_args = urllib.urlencode(self.parameters)
if self.testmode == True:
url = 'https://test.authorize.net/gateway/transact.dll'
else:
url = 'https://secure.authorize.net/gateway/transact.dll'
if self.proxy is None:
self.results += str(urllib.urlopen(
url, encoded_args).read()).split(self.delimiter)
else:
opener = urllib.FancyURLopener(self.proxy)
opened = opener.open(url, encoded_args)
try:
self.results += str(opened.read()).split(self.delimiter)
finally:
opened.close()
Results = namedtuple('Results', 'ResultResponse ResponseSubcode ResponseCode ResponseText AuthCode \
AVSResponse TransactionID InvoiceNumber Description Amount PaymentMethod \
TransactionType CustomerID CHFirstName CHLastName Company BillingAddress \
BillingCity BillingState BillingZip BillingCountry Phone Fax Email ShippingFirstName \
ShippingLastName ShippingCompany ShippingAddress ShippingCity ShippingState \
ShippingZip ShippingCountry TaxAmount DutyAmount FreightAmount TaxExemptFlag \
PONumber MD5Hash CVVResponse CAVVResponse')
self.response = Results(*tuple(r for r in self.results)[0:40])
if self.getResultResponseFull() == 'Approved':
self.error = False
self.success = True
self.declined = False
elif self.getResultResponseFull() == 'Declined':
self.error = False
self.success = False
self.declined = True
else:
raise AIM.AIMError(self.response.ResponseText)
def setTransaction(self, creditcard, expiration, total, cvv=None, tax=None, invoice=None):
if str(creditcard).strip() == '' or creditcard is None:
raise AIM.AIMError('No credit card number passed to setTransaction(): {0}'.format(creditcard))
if str(expiration).strip() == '' or expiration is None:
raise AIM.AIMError('No expiration number to setTransaction(): {0}'.format(expiration))
if str(total).strip() == '' or total is None:
raise AIM.AIMError('No total amount passed to setTransaction(): {0}'.format(total))
self.setParameter('x_card_num', creditcard)
self.setParameter('x_exp_date', expiration)
self.setParameter('x_amount', total)
if cvv is not None:
self.setParameter('x_card_code', cvv)
if tax is not None:
self.setParameter('x_tax', tax)
if invoice is not None:
self.setParameter('x_invoice_num', invoice)
def setTransactionType(self, transtype=None):
types = ['AUTH_CAPTURE', 'AUTH_ONLY', 'PRIOR_AUTH_CAPTURE',
'CREDIT', 'CAPTURE_ONLY', 'VOID']
if transtype.upper() not in types:
raise AIM.AIMError('Incorrect Transaction Type passed to setTransactionType(): {0}'.format(transtype))
self.setParameter('x_type', transtype.upper())
def setProxy(self, proxy=None):
if str(proxy).strip() == '' or proxy is None:
raise AIM.AIMError('No proxy passed to setProxy()')
self.proxy = {'http': str(proxy).strip()}
def setParameter(self, key=None, value=None):
if key is not None and value is not None and str(key).strip() != '' and str(value).strip() != '':
self.parameters[key] = str(value).strip()
else:
raise AIM.AIMError('Incorrect parameters passed to setParameter(): {0}:{1}'.format(key, value))
def isApproved(self):
return self.success
def isDeclined(self):
return self.declined
def isError(self):
return self.error
def getResultResponseFull(self):
responses = ['', 'Approved', 'Declined', 'Error']
return responses[int(self.results[0])]
def process(creditcard, expiration, total, cvv=None, tax=None, invoice=None,
login='cnpdev4289', transkey='SR2P8g4jdEn7vFLQ', testmode=True):
payment = AIM(login, transkey, testmode)
expiration = expiration.replace('/', '')
payment.setTransaction(creditcard, expiration, total, cvv, tax, invoice)
try:
payment.process()
return payment.isApproved()
except AIM.AIMError:
return False
def test():
import socket
import sys
from time import time
creditcard = '4427802641004797'
expiration = '122012'
total = '1.00'
cvv = '123'
tax = '0.00'
invoice = str(time())[4:10] # get a random invoice number
try:
payment = AIM('cnpdev4289', 'SR2P8g4jdEn7vFLQ', True)
payment.setTransaction(
creditcard, expiration, total, cvv, tax, invoice)
payment.setParameter(
'x_duplicate_window', 180) # three minutes duplicate windows
payment.setParameter('x_cust_id', '1324') # customer ID
payment.setParameter('x_first_name', 'John')
payment.setParameter('x_last_name', 'Conde')
payment.setParameter('x_company', 'Test Company')
payment.setParameter('x_address', '1234 Main Street')
payment.setParameter('x_city', 'Townsville')
payment.setParameter('x_state', 'NJ')
payment.setParameter('x_zip', '12345')
payment.setParameter('x_country', 'US')
payment.setParameter('x_phone', '800-555-1234')
payment.setParameter('x_description', 'Test Transaction')
payment.setParameter(
'x_customer_ip', socket.gethostbyname(socket.gethostname()))
payment.setParameter('x_email', 'john@example.com')
payment.setParameter('x_email_customer', False)
payment.process()
if payment.isApproved():
print 'Response Code: ', payment.response.ResponseCode
print 'Response Text: ', payment.response.ResponseText
print 'Response: ', payment.getResultResponseFull()
print 'Transaction ID: ', payment.response.TransactionID
print 'CVV Result: ', payment.response.CVVResponse
print 'Approval Code: ', payment.response.AuthCode
print 'AVS Result: ', payment.response.AVSResponse
elif payment.isDeclined():
print 'Your credit card was declined by your bank'
elif payment.isError():
raise AIM.AIMError('An uncaught error occurred')
except AIM.AIMError, e:
print "Exception thrown:", e
print 'An error occured'
print 'approved', payment.isApproved()
print 'declined', payment.isDeclined()
print 'error', payment.isError()
if __name__ == '__main__':
test()
| Python |
"""
PyRSS2Gen - A Python library for generating RSS 2.0 feeds.
(This is the BSD license, based on the template at
http://www.opensource.org/licenses/bsd-license.php )
Copyright (c) 2003, Dalke Scientific Software, LLC
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of the Dalke Scientific Softare, LLC, Andrew
Dalke, nor the names of its contributors may be used to endorse or
promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__name__ = "PyRSS2Gen"
__version__ = (1, 1, 0)
__author__ = "Andrew Dalke <dalke@dalkescientific.com>"
_generator_name = __name__ + "-" + ".".join(map(str, __version__))
import datetime
import sys
if sys.version_info[0] == 3:
# Python 3
basestring = str
from io import StringIO
else:
# Python 2
try:
from cStringIO import StringIO
except ImportError:
# Very old (or memory constrained) systems might
# have left out the compiled C version. Fall back
# to the pure Python one. Haven't seen this sort
# of system since the early 2000s.
from StringIO import StringIO
# Could make this the base class; will need to add 'publish'
class WriteXmlMixin:
def write_xml(self, outfile, encoding="iso-8859-1"):
from xml.sax import saxutils
handler = saxutils.XMLGenerator(outfile, encoding)
handler.startDocument()
self.publish(handler)
handler.endDocument()
def to_xml(self, encoding="iso-8859-1"):
f = StringIO()
self.write_xml(f, encoding)
return f.getvalue()
def _element(handler, name, obj, d={}):
if isinstance(obj, basestring) or obj is None:
# special-case handling to make the API easier
# to use for the common case.
handler.startElement(name, d)
if obj is not None:
handler.characters(obj)
handler.endElement(name)
else:
# It better know how to emit the correct XML.
obj.publish(handler)
def _opt_element(handler, name, obj):
if obj is None:
return
_element(handler, name, obj)
def _format_date(dt):
"""convert a datetime into an RFC 822 formatted date
Input date must be in GMT.
"""
# Looks like:
# Sat, 07 Sep 2002 00:00:01 GMT
# Can't use strftime because that's locale dependent
#
# Isn't there a standard way to do this for Python? The
# rfc822 and email.Utils modules assume a timestamp. The
# following is based on the rfc822 module.
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (
["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][dt.weekday()],
dt.day,
["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][dt.month - 1],
dt.year, dt.hour, dt.minute, dt.second)
##
# A couple simple wrapper objects for the fields which
# take a simple value other than a string.
class IntElement:
"""implements the 'publish' API for integers
Takes the tag name and the integer value to publish.
(Could be used for anything which uses str() to be published
to text for XML.)
"""
element_attrs = {}
def __init__(self, name, val):
self.name = name
self.val = val
def publish(self, handler):
handler.startElement(self.name, self.element_attrs)
handler.characters(str(self.val))
handler.endElement(self.name)
class DateElement:
"""implements the 'publish' API for a datetime.datetime
Takes the tag name and the datetime to publish.
Converts the datetime to RFC 2822 timestamp (4-digit year).
"""
def __init__(self, name, dt):
self.name = name
self.dt = dt
def publish(self, handler):
_element(handler, self.name, _format_date(self.dt))
####
class Category:
"""Publish a category element"""
def __init__(self, category, domain=None):
self.category = category
self.domain = domain
def publish(self, handler):
d = {}
if self.domain is not None:
d["domain"] = self.domain
_element(handler, "category", self.category, d)
class Cloud:
"""Publish a cloud"""
def __init__(self, domain, port, path,
registerProcedure, protocol):
self.domain = domain
self.port = port
self.path = path
self.registerProcedure = registerProcedure
self.protocol = protocol
def publish(self, handler):
_element(handler, "cloud", None, {
"domain": self.domain,
"port": str(self.port),
"path": self.path,
"registerProcedure": self.registerProcedure,
"protocol": self.protocol})
class Image:
"""Publish a channel Image"""
element_attrs = {}
def __init__(self, url, title, link,
width=None, height=None, description=None):
self.url = url
self.title = title
self.link = link
self.width = width
self.height = height
self.description = description
def publish(self, handler):
handler.startElement("image", self.element_attrs)
_element(handler, "url", self.url)
_element(handler, "title", self.title)
_element(handler, "link", self.link)
width = self.width
if isinstance(width, int):
width = IntElement("width", width)
_opt_element(handler, "width", width)
height = self.height
if isinstance(height, int):
height = IntElement("height", height)
_opt_element(handler, "height", height)
_opt_element(handler, "description", self.description)
handler.endElement("image")
class Guid:
"""Publish a guid
Defaults to being a permalink, which is the assumption if it's
omitted. Hence strings are always permalinks.
"""
def __init__(self, guid, isPermaLink=1):
self.guid = guid
self.isPermaLink = isPermaLink
def publish(self, handler):
d = {}
if self.isPermaLink:
d["isPermaLink"] = "true"
else:
d["isPermaLink"] = "false"
_element(handler, "guid", self.guid, d)
class TextInput:
"""Publish a textInput
Apparently this is rarely used.
"""
element_attrs = {}
def __init__(self, title, description, name, link):
self.title = title
self.description = description
self.name = name
self.link = link
def publish(self, handler):
handler.startElement("textInput", self.element_attrs)
_element(handler, "title", self.title)
_element(handler, "description", self.description)
_element(handler, "name", self.name)
_element(handler, "link", self.link)
handler.endElement("textInput")
class Enclosure:
"""Publish an enclosure"""
def __init__(self, url, length, type):
self.url = url
self.length = length
self.type = type
def publish(self, handler):
_element(handler, "enclosure", None,
{"url": self.url,
"length": str(self.length),
"type": self.type,
})
class Source:
"""Publish the item's original source, used by aggregators"""
def __init__(self, name, url):
self.name = name
self.url = url
def publish(self, handler):
_element(handler, "source", self.name, {"url": self.url})
class SkipHours:
"""Publish the skipHours
This takes a list of hours, as integers.
"""
element_attrs = {}
def __init__(self, hours):
self.hours = hours
def publish(self, handler):
if self.hours:
handler.startElement("skipHours", self.element_attrs)
for hour in self.hours:
_element(handler, "hour", str(hour))
handler.endElement("skipHours")
class SkipDays:
"""Publish the skipDays
This takes a list of days as strings.
"""
element_attrs = {}
def __init__(self, days):
self.days = days
def publish(self, handler):
if self.days:
handler.startElement("skipDays", self.element_attrs)
for day in self.days:
_element(handler, "day", day)
handler.endElement("skipDays")
class RSS2(WriteXmlMixin):
"""The main RSS class.
Stores the channel attributes, with the "category" elements under
".categories" and the RSS items under ".items".
"""
rss_attrs = {"version": "2.0"}
element_attrs = {}
def __init__(self,
title,
link,
description,
language=None,
copyright=None,
managingEditor=None,
webMaster=None,
pubDate=None, # a datetime, *in* *GMT*
lastBuildDate=None, # a datetime
categories=None, # list of strings or Category
generator=_generator_name,
docs="http://blogs.law.harvard.edu/tech/rss",
cloud=None, # a Cloud
ttl=None, # integer number of minutes
image=None, # an Image
rating=None, # a string; I don't know how it's used
textInput=None, # a TextInput
skipHours=None, # a SkipHours with a list of integers
skipDays=None, # a SkipDays with a list of strings
items=None, # list of RSSItems
):
self.title = title
self.link = link
self.description = description
self.language = language
self.copyright = copyright
self.managingEditor = managingEditor
self.webMaster = webMaster
self.pubDate = pubDate
self.lastBuildDate = lastBuildDate
if categories is None:
categories = []
self.categories = categories
self.generator = generator
self.docs = docs
self.cloud = cloud
self.ttl = ttl
self.image = image
self.rating = rating
self.textInput = textInput
self.skipHours = skipHours
self.skipDays = skipDays
if items is None:
items = []
self.items = items
def publish(self, handler):
handler.startElement("rss", self.rss_attrs)
handler.startElement("channel", self.element_attrs)
_element(handler, "title", self.title)
_element(handler, "link", self.link)
_element(handler, "description", self.description)
self.publish_extensions(handler)
_opt_element(handler, "language", self.language)
_opt_element(handler, "copyright", self.copyright)
_opt_element(handler, "managingEditor", self.managingEditor)
_opt_element(handler, "webMaster", self.webMaster)
pubDate = self.pubDate
if isinstance(pubDate, datetime.datetime):
pubDate = DateElement("pubDate", pubDate)
_opt_element(handler, "pubDate", pubDate)
lastBuildDate = self.lastBuildDate
if isinstance(lastBuildDate, datetime.datetime):
lastBuildDate = DateElement("lastBuildDate", lastBuildDate)
_opt_element(handler, "lastBuildDate", lastBuildDate)
for category in self.categories:
if isinstance(category, basestring):
category = Category(category)
category.publish(handler)
_opt_element(handler, "generator", self.generator)
_opt_element(handler, "docs", self.docs)
if self.cloud is not None:
self.cloud.publish(handler)
ttl = self.ttl
if isinstance(self.ttl, int):
ttl = IntElement("ttl", ttl)
_opt_element(handler, "ttl", ttl)
if self.image is not None:
self.image.publish(handler)
_opt_element(handler, "rating", self.rating)
if self.textInput is not None:
self.textInput.publish(handler)
if self.skipHours is not None:
self.skipHours.publish(handler)
if self.skipDays is not None:
self.skipDays.publish(handler)
for item in self.items:
item.publish(handler)
handler.endElement("channel")
handler.endElement("rss")
def publish_extensions(self, handler):
# Derived classes can hook into this to insert
# output after the three required fields.
pass
class RSSItem(WriteXmlMixin):
"""Publish an RSS Item"""
element_attrs = {}
def __init__(self,
title=None, # string
link=None, # url as string
description=None, # string
author=None, # email address as string
categories=None, # list of string or Category
comments=None, # url as string
enclosure=None, # an Enclosure
guid=None, # a unique string
pubDate=None, # a datetime
source=None, # a Source
):
if title is None and description is None:
raise TypeError(
"must define at least one of 'title' or 'description'")
self.title = title
self.link = link
self.description = description
self.author = author
if categories is None:
categories = []
self.categories = categories
self.comments = comments
self.enclosure = enclosure
self.guid = guid
self.pubDate = pubDate
self.source = source
# It sure does get tedious typing these names three times...
def publish(self, handler):
handler.startElement("item", self.element_attrs)
_opt_element(handler, "title", self.title)
_opt_element(handler, "link", self.link)
self.publish_extensions(handler)
_opt_element(handler, "description", self.description)
_opt_element(handler, "author", self.author)
for category in self.categories:
if isinstance(category, basestring):
category = Category(category)
category.publish(handler)
_opt_element(handler, "comments", self.comments)
if self.enclosure is not None:
self.enclosure.publish(handler)
_opt_element(handler, "guid", self.guid)
pubDate = self.pubDate
if isinstance(pubDate, datetime.datetime):
pubDate = DateElement("pubDate", pubDate)
_opt_element(handler, "pubDate", pubDate)
if self.source is not None:
self.source.publish(handler)
handler.endElement("item")
def publish_extensions(self, handler):
# Derived classes can hook into this to insert
# output after the title and link elements
pass
| Python |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Pythonic simple SOAP Client implementation"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2008 Mariano Reingart"
__license__ = "LGPL 3.0"
TIMEOUT = 60
import os
import cPickle as pickle
import urllib2
from urlparse import urlparse
import tempfile
from simplexml import SimpleXMLElement, TYPE_MAP, OrderedDict
import logging
log = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
#
# We store metadata about what available transport mechanisms we have available.
#
_http_connectors = {} # libname: classimpl mapping
_http_facilities = {} # functionalitylabel: [sequence of libname] mapping
class TransportBase:
@classmethod
def supports_feature(cls, feature_name):
return cls._wrapper_name in _http_facilities[feature_name]
#
# httplib2 support.
#
try:
import httplib2
except ImportError:
TIMEOUT = None # timeout not supported by urllib2
pass
else:
class Httplib2Transport(httplib2.Http, TransportBase):
_wrapper_version = "httplib2 %s" % httplib2.__version__
_wrapper_name = 'httplib2'
def __init__(self, timeout, proxy=None, cacert=None, sessions=False):
##httplib2.debuglevel=4
kwargs = {}
if proxy:
import socks
kwargs['proxy_info'] = httplib2.ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, **proxy)
print "using proxy", proxy
# set optional parameters according supported httplib2 version
if httplib2.__version__ >= '0.3.0':
kwargs['timeout'] = timeout
if httplib2.__version__ >= '0.7.0':
kwargs['disable_ssl_certificate_validation'] = cacert is None
kwargs['ca_certs'] = cacert
httplib2.Http.__init__(self, **kwargs)
_http_connectors['httplib2'] = Httplib2Transport
_http_facilities.setdefault('proxy', []).append('httplib2')
_http_facilities.setdefault('cacert', []).append('httplib2')
import inspect
if 'timeout' in inspect.getargspec(httplib2.Http.__init__)[0]:
_http_facilities.setdefault('timeout', []).append('httplib2')
#
# urllib2 support.
#
import urllib2
class urllib2Transport(TransportBase):
_wrapper_version = "urllib2 %s" % urllib2.__version__
_wrapper_name = 'urllib2'
def __init__(self, timeout=None, proxy=None, cacert=None, sessions=False):
import sys
if (timeout is not None) and not self.supports_feature('timeout'):
raise RuntimeError('timeout is not supported with urllib2 transport')
if proxy:
raise RuntimeError('proxy is not supported with urllib2 transport')
if cacert:
raise RuntimeError('cacert is not support with urllib2 transport')
self.request_opener = urllib2.urlopen
if sessions:
from cookielib import CookieJar
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(CookieJar()))
self.request_opener = opener.open
self._timeout = timeout
def request(self, url, method="GET", body=None, headers={}):
req = urllib2.Request(url, body, headers)
try:
f = self.request_opener(req, timeout=self._timeout)
except urllib2.HTTPError, f:
if f.code != 500:
raise
return f.info(), f.read()
_http_connectors['urllib2'] = urllib2Transport
_http_facilities.setdefault('sessions', []).append('urllib2')
import sys
if sys.version_info >= (2,6):
_http_facilities.setdefault('timeout', []).append('urllib2')
del sys
#
# pycurl support.
# experimental: pycurl seems faster + better proxy support (NTLM) + ssl features
#
try:
import pycurl
except ImportError:
pass
else:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class pycurlTransport(TransportBase):
_wrapper_version = pycurl.version
_wrapper_name = 'pycurl'
def __init__(self, timeout, proxy=None, cacert=None, sessions=False):
self.timeout = timeout
self.proxy = proxy or {}
self.cacert = cacert
def request(self, url, method, body, headers):
c = pycurl.Curl()
c.setopt(pycurl.URL, str(url))
if 'proxy_host' in self.proxy:
c.setopt(pycurl.PROXY, self.proxy['proxy_host'])
if 'proxy_port' in self.proxy:
c.setopt(pycurl.PROXYPORT, self.proxy['proxy_port'])
if 'proxy_user' in self.proxy:
c.setopt(pycurl.PROXYUSERPWD, "%(proxy_user)s:%(proxy_pass)s" % self.proxy)
self.buf = StringIO()
c.setopt(pycurl.WRITEFUNCTION, self.buf.write)
#c.setopt(pycurl.READFUNCTION, self.read)
#self.body = StringIO(body)
#c.setopt(pycurl.HEADERFUNCTION, self.header)
if self.cacert:
c.setopt(c.CAINFO, str(self.cacert))
c.setopt(pycurl.SSL_VERIFYPEER, self.cacert and 1 or 0)
c.setopt(pycurl.SSL_VERIFYHOST, self.cacert and 2 or 0)
c.setopt(pycurl.CONNECTTIMEOUT, self.timeout/6)
c.setopt(pycurl.TIMEOUT, self.timeout)
if method=='POST':
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.POSTFIELDS, body)
if headers:
hdrs = ['%s: %s' % (str(k), str(v)) for k, v in headers.items()]
##print hdrs
c.setopt(pycurl.HTTPHEADER, hdrs)
c.perform()
##print "pycurl perform..."
c.close()
return {}, self.buf.getvalue()
_http_connectors['pycurl'] = pycurlTransport
_http_facilities.setdefault('proxy', []).append('pycurl')
_http_facilities.setdefault('cacert', []).append('pycurl')
_http_facilities.setdefault('timeout', []).append('pycurl')
class DummyTransport:
"Testing class to load a xml response"
def __init__(self, xml_response):
self.xml_response = xml_response
def request(self, location, method, body, headers):
print method, location
print headers
print body
return {}, self.xml_response
def get_http_wrapper(library=None, features=[]):
# If we are asked for a specific library, return it.
if library is not None:
try:
return _http_connectors[library]
except KeyError:
raise RuntimeError('%s transport is not available' % (library,))
# If we haven't been asked for a specific feature either, then just return our favourite
# implementation.
if not features:
return _http_connectors.get('httplib2', _http_connectors['urllib2'])
# If we are asked for a connector which supports the given features, then we will
# try that.
current_candidates = _http_connectors.keys()
new_candidates = []
for feature in features:
for candidate in current_candidates:
if candidate in _http_facilities.get(feature, []):
new_candidates.append(candidate)
current_candidates = new_candidates
new_candidates = []
# Return the first candidate in the list.
try:
candidate_name = current_candidates[0]
except IndexError:
raise RuntimeError("no transport available which supports these features: %s" % (features,))
else:
return _http_connectors[candidate_name]
def set_http_wrapper(library=None, features=[]):
"Set a suitable HTTP connection wrapper."
global Http
Http = get_http_wrapper(library, features)
return Http
def get_Http():
"Return current transport class"
global Http
return Http
# define the default HTTP connection class (it can be changed at runtime!):
set_http_wrapper()
| Python |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Simple SOAP Server implementation"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "1.03c"
import logging
import re
import traceback
from simplexml import SimpleXMLElement, TYPE_MAP, Date, Decimal
log = logging.getLogger(__name__)
# Deprecated
DEBUG = False
NS_RX=re.compile(r'xmlns:(\w+)="(.+?)"')
class SoapDispatcher(object):
"Simple Dispatcher for SOAP Server"
def __init__(self, name, documentation='', action='', location='',
namespace=None, prefix=False,
soap_uri="http://schemas.xmlsoap.org/soap/envelope/",
soap_ns='soap',
namespaces={},
pretty=False,
debug=False,
**kwargs):
"""
:param namespace: Target namespace; xmlns=targetNamespace
:param prefix: Prefix for target namespace; xmlns:prefix=targetNamespace
:param namespaces: Specify additional namespaces; example: {'external': 'http://external.mt.moboperator'}
:param pretty: Prettifies generated xmls
:param debug: Use to add tracebacks in generated xmls.
Multiple namespaces
===================
It is possible to support multiple namespaces.
You need to specify additional namespaces by passing `namespace` parameter.
>>> dispatcher = SoapDispatcher(
... name = "MTClientWS",
... location = "http://localhost:8008/ws/MTClientWS",
... action = 'http://localhost:8008/ws/MTClientWS', # SOAPAction
... namespace = "http://external.mt.moboperator", prefix="external",
... documentation = 'moboperator MTClientWS',
... namespaces = {
... 'external': 'http://external.mt.moboperator',
... 'model': 'http://model.common.mt.moboperator'
... },
... ns = True)
Now the registered method must return node names with namespaces' prefixes.
>>> def _multi_ns_func(self, serviceMsisdn):
... ret = {
... 'external:activateSubscriptionsReturn': [
... {'model:code': '0'},
... {'model:description': 'desc'},
... ]}
... return ret
Our prefixes will be changed to those used by the client.
"""
self.methods = {}
self.name = name
self.documentation = documentation
self.action = action # base SoapAction
self.location = location
self.namespace = namespace # targetNamespace
self.prefix = prefix
self.soap_ns = soap_ns
self.soap_uri = soap_uri
self.namespaces = namespaces
self.pretty = pretty
self.debug = debug
@staticmethod
def _extra_namespaces(xml, ns):
"""Extends xml with extra namespaces.
:param ns: dict with namespaceUrl:prefix pairs
:param xml: XML node to modify
"""
if ns:
_tpl = 'xmlns:%s="%s"'
_ns_str = " ".join([_tpl % (prefix, uri) for uri, prefix in ns.items() if uri not in xml])
xml = xml.replace('/>', ' '+_ns_str+'/>')
return xml
def register_function(self, name, fn, returns=None, args=None, doc=None):
self.methods[name] = fn, returns, args, doc or getattr(fn, "__doc__", "")
def dispatch(self, xml, action=None):
"Receive and proccess SOAP call"
# default values:
prefix = self.prefix
ret = fault = None
soap_ns, soap_uri = self.soap_ns, self.soap_uri
soap_fault_code = 'VersionMismatch'
name = None
# namespaces = [('model', 'http://model.common.mt.moboperator'), ('external', 'http://external.mt.moboperator')]
_ns_reversed = dict(((v,k) for k,v in self.namespaces.iteritems())) # Switch keys-values
# _ns_reversed = {'http://external.mt.moboperator': 'external', 'http://model.common.mt.moboperator': 'model'}
try:
request = SimpleXMLElement(xml, namespace=self.namespace)
# detect soap prefix and uri (xmlns attributes of Envelope)
for k, v in request[:]:
if v in ("http://schemas.xmlsoap.org/soap/envelope/",
"http://www.w3.org/2003/05/soap-env",):
soap_ns = request.attributes()[k].localName
soap_uri = request.attributes()[k].value
# If the value from attributes on Envelope is in additional namespaces
elif v in self.namespaces.values():
_ns = request.attributes()[k].localName
_uri = request.attributes()[k].value
_ns_reversed[_uri] = _ns # update with received alias
# Now we change 'external' and 'model' to the received forms i.e. 'ext' and 'mod'
# After that we know how the client has prefixed additional namespaces
ns = NS_RX.findall(xml)
for k, v in ns:
if v in self.namespaces.values():
_ns_reversed[v] = k
soap_fault_code = 'Client'
# parse request message and get local method
method = request('Body', ns=soap_uri).children()(0)
if action:
# method name = action
name = action[len(self.action)+1:-1]
prefix = self.prefix
if not action or not name:
# method name = input message name
name = method.get_local_name()
prefix = method.get_prefix()
log.debug('dispatch method: %s', name)
function, returns_types, args_types, doc = self.methods[name]
log.debug('returns_types %s', returns_types)
# de-serialize parameters (if type definitions given)
if args_types:
args = method.children().unmarshall(args_types)
elif args_types is None:
args = {'request': method} # send raw request
else:
args = {} # no parameters
soap_fault_code = 'Server'
# execute function
ret = function(**args)
log.debug('dispathed method returns: %s', ret)
except Exception: # This shouldn't be one huge try/except
import sys
etype, evalue, etb = sys.exc_info()
log.error(traceback.format_exc())
if self.debug:
detail = ''.join(traceback.format_exception(etype, evalue, etb))
detail += '\n\nXML REQUEST\n\n' + xml
else:
detail = None
fault = {'faultcode': "%s.%s" % (soap_fault_code, etype.__name__),
'faultstring': unicode(evalue),
'detail': detail}
# build response message
if not prefix:
xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s"/>"""
else:
xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s"
xmlns:%(prefix)s="%(namespace)s"/>"""
xml %= { # a %= {} is a shortcut for a = a % {}
'namespace': self.namespace,
'prefix': prefix,
'soap_ns': soap_ns,
'soap_uri': soap_uri
}
# Now we add extra namespaces
xml = SoapDispatcher._extra_namespaces(xml, _ns_reversed)
# Change our namespace alias to that given by the client.
# We put [('model', 'http://model.common.mt.moboperator'), ('external', 'http://external.mt.moboperator')]
# mix it with {'http://external.mt.moboperator': 'ext', 'http://model.common.mt.moboperator': 'mod'}
mapping = dict(((k, _ns_reversed[v]) for k,v in self.namespaces.iteritems())) # Switch keys-values and change value
# and get {'model': u'mod', 'external': u'ext'}
response = SimpleXMLElement(xml,
namespace=self.namespace,
namespaces_map = mapping,
prefix=prefix)
response['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance"
response['xmlns:xsd'] = "http://www.w3.org/2001/XMLSchema"
body = response.add_child("%s:Body" % soap_ns, ns=False)
if fault:
# generate a Soap Fault (with the python exception)
body.marshall("%s:Fault" % soap_ns, fault, ns=False)
else:
# return normal value
res = body.add_child("%sResponse" % name, ns=prefix)
if not prefix:
res['xmlns'] = self.namespace # add target namespace
# serialize returned values (response) if type definition available
if returns_types:
if not isinstance(ret, dict):
res.marshall(returns_types.keys()[0], ret, )
else:
for k,v in ret.items():
res.marshall(k, v)
elif returns_types is None:
# merge xmlelement returned
res.import_node(ret)
elif returns_types == {}:
log.warning('Given returns_types is an empty dict.')
return response.as_xml(pretty=self.pretty)
# Introspection functions:
def list_methods(self):
"Return a list of aregistered operations"
return [(method, doc) for method, (function, returns, args, doc) in self.methods.items()]
def help(self, method=None):
"Generate sample request and response messages"
(function, returns, args, doc) = self.methods[method]
xml = """
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body><%(method)s xmlns="%(namespace)s"/></soap:Body>
</soap:Envelope>""" % {'method':method, 'namespace':self.namespace}
request = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix)
if args:
items = args.items()
elif args is None:
items = [('value', None)]
else:
items = []
for k,v in items:
request(method).marshall(k, v, add_comments=True, ns=False)
xml = """
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body><%(method)sResponse xmlns="%(namespace)s"/></soap:Body>
</soap:Envelope>""" % {'method':method, 'namespace':self.namespace}
response = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix)
if returns:
items = returns.items()
elif args is None:
items = [('value', None)]
else:
items = []
for k,v in items:
response('%sResponse'%method).marshall(k, v, add_comments=True, ns=False)
return request.as_xml(pretty=True), response.as_xml(pretty=True), doc
def wsdl(self):
"Generate Web Service Description v1.1"
xml = """<?xml version="1.0"?>
<wsdl:definitions name="%(name)s"
targetNamespace="%(namespace)s"
xmlns:tns="%(namespace)s"
xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<wsdl:documentation xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/">%(documentation)s</wsdl:documentation>
<wsdl:types>
<xsd:schema targetNamespace="%(namespace)s"
elementFormDefault="qualified"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
</xsd:schema>
</wsdl:types>
</wsdl:definitions>
""" % {'namespace': self.namespace, 'name': self.name, 'documentation': self.documentation}
wsdl = SimpleXMLElement(xml)
for method, (function, returns, args, doc) in self.methods.items():
# create elements:
def parse_element(name, values, array=False, complex=False):
if not complex:
element = wsdl('wsdl:types')('xsd:schema').add_child('xsd:element')
complex = element.add_child("xsd:complexType")
else:
complex = wsdl('wsdl:types')('xsd:schema').add_child('xsd:complexType')
element = complex
element['name'] = name
if values:
items = values
elif values is None:
items = [('value', None)]
else:
items = []
if not array and items:
all = complex.add_child("xsd:all")
elif items:
all = complex.add_child("xsd:sequence")
for k,v in items:
e = all.add_child("xsd:element")
e['name'] = k
if array:
e[:]={'minOccurs': "0", 'maxOccurs': "unbounded"}
if v in TYPE_MAP.keys():
t='xsd:%s' % TYPE_MAP[v]
elif v is None:
t='xsd:anyType'
elif isinstance(v, list):
n="ArrayOf%s%s" % (name, k)
l = []
for d in v:
l.extend(d.items())
parse_element(n, l, array=True, complex=True)
t = "tns:%s" % n
elif isinstance(v, dict):
n="%s%s" % (name, k)
parse_element(n, v.items(), complex=True)
t = "tns:%s" % n
e.add_attribute('type', t)
parse_element("%s" % method, args and args.items())
parse_element("%sResponse" % method, returns and returns.items())
# create messages:
for m,e in ('Input',''), ('Output','Response'):
message = wsdl.add_child('wsdl:message')
message['name'] = "%s%s" % (method, m)
part = message.add_child("wsdl:part")
part[:] = {'name': 'parameters',
'element': 'tns:%s%s' % (method,e)}
# create ports
portType = wsdl.add_child('wsdl:portType')
portType['name'] = "%sPortType" % self.name
for method, (function, returns, args, doc) in self.methods.items():
op = portType.add_child('wsdl:operation')
op['name'] = method
if doc:
op.add_child("wsdl:documentation", doc)
input = op.add_child("wsdl:input")
input['message'] = "tns:%sInput" % method
output = op.add_child("wsdl:output")
output['message'] = "tns:%sOutput" % method
# create bindings
binding = wsdl.add_child('wsdl:binding')
binding['name'] = "%sBinding" % self.name
binding['type'] = "tns:%sPortType" % self.name
soapbinding = binding.add_child('soap:binding')
soapbinding['style'] = "document"
soapbinding['transport'] = "http://schemas.xmlsoap.org/soap/http"
for method in self.methods.keys():
op = binding.add_child('wsdl:operation')
op['name'] = method
soapop = op.add_child('soap:operation')
soapop['soapAction'] = self.action + method
soapop['style'] = 'document'
input = op.add_child("wsdl:input")
##input.add_attribute('name', "%sInput" % method)
soapbody = input.add_child("soap:body")
soapbody["use"] = "literal"
output = op.add_child("wsdl:output")
##output.add_attribute('name', "%sOutput" % method)
soapbody = output.add_child("soap:body")
soapbody["use"] = "literal"
service = wsdl.add_child('wsdl:service')
service["name"] = "%sService" % self.name
service.add_child('wsdl:documentation', text=self.documentation)
port=service.add_child('wsdl:port')
port["name"] = "%s" % self.name
port["binding"] = "tns:%sBinding" % self.name
soapaddress = port.add_child('soap:address')
soapaddress["location"] = self.location
return wsdl.as_xml(pretty=True)
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class SOAPHandler(BaseHTTPRequestHandler):
def do_GET(self):
"User viewable help information and wsdl"
args = self.path[1:].split("?")
if self.path != "/" and args[0] not in self.server.dispatcher.methods.keys():
self.send_error(404, "Method not found: %s" % args[0])
else:
if self.path == "/":
# return wsdl if no method supplied
response = self.server.dispatcher.wsdl()
else:
# return supplied method help (?request or ?response messages)
req, res, doc = self.server.dispatcher.help(args[0])
if len(args)==1 or args[1]=="request":
response = req
else:
response = res
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.end_headers()
self.wfile.write(response)
def do_POST(self):
"SOAP POST gateway"
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.end_headers()
request = self.rfile.read(int(self.headers.getheader('content-length')))
response = self.server.dispatcher.dispatch(request)
self.wfile.write(response)
if __name__=="__main__":
import sys
dispatcher = SoapDispatcher(
name = "PySimpleSoapSample",
location = "http://localhost:8008/",
action = 'http://localhost:8008/', # SOAPAction
namespace = "http://example.com/pysimplesoapsamle/", prefix="ns0",
documentation = 'Example soap service using PySimpleSoap',
trace = True,
ns = True)
def adder(p,c, dt=None):
"Add several values"
print c[0]['d'],c[1]['d'],
import datetime
dt = dt + datetime.timedelta(365)
return {'ab': p['a']+p['b'], 'dd': c[0]['d']+c[1]['d'], 'dt': dt}
def dummy(in0):
"Just return input"
return in0
def echo(request):
"Copy request->response (generic, any type)"
return request.value
dispatcher.register_function('Adder', adder,
returns={'AddResult': {'ab': int, 'dd': str } },
args={'p': {'a': int,'b': int}, 'dt': Date, 'c': [{'d': Decimal}]})
dispatcher.register_function('Dummy', dummy,
returns={'out0': str},
args={'in0': str})
dispatcher.register_function('Echo', echo)
if '--local' in sys.argv:
wsdl=dispatcher.wsdl()
print wsdl
# Commented because path is platform dependent
# Looks that it doesnt matter.
# open("C:/test.wsdl","w").write(wsdl)
for method, doc in dispatcher.list_methods():
request, response, doc = dispatcher.help(method)
##print request
##print response
if '--serve' in sys.argv:
print "Starting server..."
httpd = HTTPServer(("", 8008), SOAPHandler)
httpd.dispatcher = dispatcher
httpd.serve_forever()
if '--consume' in sys.argv:
from client import SoapClient
client = SoapClient(
location = "http://localhost:8008/",
action = 'http://localhost:8008/', # SOAPAction
namespace = "http://example.com/sample.wsdl",
soap_ns='soap',
trace = True,
ns = False)
response = client.Adder(p={'a':1,'b':2},dt='20100724',c=[{'d':'1.20'},{'d':'2.01'}])
result = response.AddResult
print int(result.ab)
print str(result.dd)
| Python |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Pythonic simple SOAP Client implementation"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2008 Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "1.07a"
TIMEOUT = 60
import cPickle as pickle
import hashlib
import logging
import os
import tempfile
import urllib2
from urlparse import urlsplit
from simplexml import SimpleXMLElement, TYPE_MAP, REVERSE_TYPE_MAP, OrderedDict
from transport import get_http_wrapper, set_http_wrapper, get_Http
log = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
class SoapFault(RuntimeError):
def __init__(self,faultcode,faultstring):
self.faultcode = faultcode
self.faultstring = faultstring
RuntimeError.__init__(self, faultcode, faultstring)
def __str__(self):
return self.__unicode__().encode("ascii", "ignore")
def __unicode__(self):
return u'%s: %s' % (self.faultcode, self.faultstring)
def __repr__(self):
return u"SoapFault(%s, %s)" % (repr(self.faultcode),
repr(self.faultstring))
# soap protocol specification & namespace
soap_namespaces = dict(
soap11="http://schemas.xmlsoap.org/soap/envelope/",
soap="http://schemas.xmlsoap.org/soap/envelope/",
soapenv="http://schemas.xmlsoap.org/soap/envelope/",
soap12="http://www.w3.org/2003/05/soap-env",
)
_USE_GLOBAL_DEFAULT = object()
class SoapClient(object):
"Simple SOAP Client (simil PHP)"
def __init__(self, location = None, action = None, namespace = None,
cert = None, trace = False, exceptions = True, proxy = None, ns=False,
soap_ns=None, wsdl = None, cache = False, cacert=None,
sessions=False, soap_server=None, timeout=_USE_GLOBAL_DEFAULT,
http_headers={}
):
"""
:param http_headers: Additional HTTP Headers; example: {'Host': 'ipsec.example.com'}
"""
self.certssl = cert
self.keyssl = None
self.location = location # server location (url)
self.action = action # SOAP base action
self.namespace = namespace # message
self.trace = trace # show debug messages
self.exceptions = exceptions # lanzar execpiones? (Soap Faults)
self.xml_request = self.xml_response = ''
self.http_headers = http_headers
if not soap_ns and not ns:
self.__soap_ns = 'soap' # 1.1
elif not soap_ns and ns:
self.__soap_ns = 'soapenv' # 1.2
else:
self.__soap_ns = soap_ns
# SOAP Server (special cases like oracle or jbossas6)
self.__soap_server = soap_server
# SOAP Header support
self.__headers = {} # general headers
self.__call_headers = None # OrderedDict to be marshalled for RPC Call
# check if the Certification Authority Cert is a string and store it
if cacert and cacert.startswith("-----BEGIN CERTIFICATE-----"):
fd, filename = tempfile.mkstemp()
f = os.fdopen(fd, 'w+b', -1)
if self.trace: log.info(u"Saving CA certificate to %s" % filename)
f.write(cacert)
cacert = filename
f.close()
self.cacert = cacert
if timeout is _USE_GLOBAL_DEFAULT:
timeout = TIMEOUT
else:
timeout = timeout
# Create HTTP wrapper
Http = get_Http()
self.http = Http(timeout=timeout, cacert=cacert, proxy=proxy, sessions=sessions)
self.__ns = ns # namespace prefix or False to not use it
if not ns:
self.__xml = """<?xml version="1.0" encoding="UTF-8"?>
<%(soap_ns)s:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:%(soap_ns)s="%(soap_uri)s">
<%(soap_ns)s:Header/>
<%(soap_ns)s:Body>
<%(method)s xmlns="%(namespace)s">
</%(method)s>
</%(soap_ns)s:Body>
</%(soap_ns)s:Envelope>"""
else:
self.__xml = """<?xml version="1.0" encoding="UTF-8"?>
<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s" xmlns:%(ns)s="%(namespace)s">
<%(soap_ns)s:Header/>
<%(soap_ns)s:Body>
<%(ns)s:%(method)s>
</%(ns)s:%(method)s>
</%(soap_ns)s:Body>
</%(soap_ns)s:Envelope>"""
# parse wsdl url
self.services = wsdl and self.wsdl_parse(wsdl, debug=trace, cache=cache)
self.service_port = None # service port for late binding
def __getattr__(self, attr):
"Return a pseudo-method that can be called"
if not self.services: # not using WSDL?
return lambda self=self, *args, **kwargs: self.call(attr,*args,**kwargs)
else: # using WSDL:
return lambda *args, **kwargs: self.wsdl_call(attr,*args,**kwargs)
def call(self, method, *args, **kwargs):
"""Prepare xml request and make SOAP call, returning a SimpleXMLElement.
If a keyword argument called "headers" is passed with a value of a
SimpleXMLElement object, then these headers will be inserted into the
request.
"""
#TODO: method != input_message
# Basic SOAP request:
xml = self.__xml % dict(method=method, namespace=self.namespace, ns=self.__ns,
soap_ns=self.__soap_ns, soap_uri=soap_namespaces[self.__soap_ns])
request = SimpleXMLElement(xml,namespace=self.__ns and self.namespace, prefix=self.__ns)
try:
request_headers = kwargs.pop('headers')
except KeyError:
request_headers = None
# serialize parameters
if kwargs:
parameters = kwargs.items()
else:
parameters = args
if parameters and isinstance(parameters[0], SimpleXMLElement):
# merge xmlelement parameter ("raw" - already marshalled)
if parameters[0].children() is not None:
for param in parameters[0].children():
getattr(request,method).import_node(param)
elif parameters:
# marshall parameters:
for k,v in parameters: # dict: tag=valor
getattr(request,method).marshall(k,v)
elif not self.__soap_server in ('oracle', ) or self.__soap_server in ('jbossas6',):
# JBossAS-6 requires no empty method parameters!
delattr(request("Body", ns=soap_namespaces.values(),), method)
# construct header and parameters (if not wsdl given) except wsse
if self.__headers and not self.services:
self.__call_headers = dict([(k, v) for k, v in self.__headers.items()
if not k.startswith("wsse:")])
# always extract WS Security header and send it
if 'wsse:Security' in self.__headers:
#TODO: namespaces too hardwired, clean-up...
header = request('Header' , ns=soap_namespaces.values(),)
k = 'wsse:Security'
v = self.__headers[k]
header.marshall(k, v, ns=False, add_children_ns=False)
header(k)['xmlns:wsse'] = 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd'
#<wsse:UsernameToken xmlns:wsu='http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd'>
if self.__call_headers:
header = request('Header' , ns=soap_namespaces.values(),)
for k, v in self.__call_headers.items():
##if not self.__ns:
## header['xmlns']
header.marshall(k, v, ns=self.__ns, add_children_ns=False)
if request_headers:
header = request('Header' , ns=soap_namespaces.values(),)
for subheader in request_headers.children():
header.import_node(subheader)
self.xml_request = request.as_xml()
self.xml_response = self.send(method, self.xml_request)
response = SimpleXMLElement(self.xml_response, namespace=self.namespace)
if self.exceptions and response("Fault", ns=soap_namespaces.values(), error=False):
raise SoapFault(unicode(response.faultcode), unicode(response.faultstring))
return response
def send(self, method, xml):
"Send SOAP request using HTTP"
if self.location == 'test': return
# location = "%s" % self.location #?op=%s" % (self.location, method)
location = self.location
if self.services:
soap_action = self.action
else:
soap_action = self.action + method
headers={
'Content-type': 'text/xml; charset="UTF-8"',
'Content-length': str(len(xml)),
"SOAPAction": "\"%s\"" % (soap_action)
}
headers.update(self.http_headers)
log.info("POST %s" % location)
log.info("Headers: %s" % headers)
if self.trace:
print "-"*80
print "POST %s" % location
print '\n'.join(["%s: %s" % (k,v) for k,v in headers.items()])
print u"\n%s" % xml.decode("utf8","ignore")
response, content = self.http.request(
location, "POST", body=xml, headers=headers)
self.response = response
self.content = content
if self.trace:
print
print '\n'.join(["%s: %s" % (k,v) for k,v in response.items()])
print content#.decode("utf8","ignore")
print "="*80
return content
def get_operation(self, method):
# try to find operation in wsdl file
soap_ver = self.__soap_ns == 'soap12' and 'soap12' or 'soap11'
if not self.service_port:
for service_name, service in self.services.items():
for port_name, port in [port for port in service['ports'].items()]:
if port['soap_ver'] == soap_ver:
self.service_port = service_name, port_name
break
else:
raise RuntimeError("Cannot determine service in WSDL: "
"SOAP version: %s" % soap_ver)
else:
port = self.services[self.service_port[0]]['ports'][self.service_port[1]]
self.location = port['location']
operation = port['operations'].get(unicode(method))
if not operation:
raise RuntimeError("Operation %s not found in WSDL: "
"Service/Port Type: %s" %
(method, self.service_port))
return operation
def wsdl_call(self, method, *args, **kwargs):
"Pre and post process SOAP call, input and output parameters using WSDL"
soap_uri = soap_namespaces[self.__soap_ns]
operation = self.get_operation(method)
# get i/o type declarations:
input = operation['input']
output = operation['output']
header = operation.get('header')
if 'action' in operation:
self.action = operation['action']
# sort parameters (same order as xsd:sequence)
def sort_dict(od, d):
if isinstance(od, dict):
ret = OrderedDict()
for k in od.keys():
v = d.get(k)
# don't append null tags!
if v is not None:
if isinstance(v, dict):
v = sort_dict(od[k], v)
elif isinstance(v, list):
v = [sort_dict(od[k][0], v1)
for v1 in v]
ret[str(k)] = v
return ret
else:
return d
# construct header and parameters
if header:
self.__call_headers = sort_dict(header, self.__headers)
if input and args:
# convert positional parameters to named parameters:
d = [(k, arg) for k, arg in zip(input.values()[0].keys(), args)]
kwargs.update(dict(d))
if input and kwargs:
params = sort_dict(input.values()[0], kwargs).items()
if self.__soap_server == "axis":
# use the operation name
method = method
else:
# use the message (element) name
method = input.keys()[0]
#elif not input:
#TODO: no message! (see wsmtxca.dummy)
else:
params = kwargs and kwargs.items()
# call remote procedure
response = self.call(method, *params)
# parse results:
resp = response('Body',ns=soap_uri).children().unmarshall(output)
return resp and resp.values()[0] # pass Response tag children
def help(self, method):
"Return operation documentation and invocation/returned value example"
operation = self.get_operation(method)
input = operation.get('input')
input = input and input.values() and input.values()[0]
if isinstance(input, dict):
input = ", ".join("%s=%s" % (k,repr(v)) for k,v
in input.items())
elif isinstance(input, list):
input = repr(input)
output = operation.get('output')
if output:
output = operation['output'].values()[0]
headers = operation.get('headers') or None
return u"%s(%s)\n -> %s:\n\n%s\nHeaders: %s" % (
method,
input or "",
output and output or "",
operation.get("documentation",""),
headers,
)
def wsdl_parse(self, url, debug=False, cache=False):
"Parse Web Service Description v1.1"
log.debug("wsdl url: %s" % url)
# Try to load a previously parsed wsdl:
force_download = False
if cache:
# make md5 hash of the url for caching...
filename_pkl = "%s.pkl" % hashlib.md5(url).hexdigest()
if isinstance(cache, basestring):
filename_pkl = os.path.join(cache, filename_pkl)
if os.path.exists(filename_pkl):
log.debug("Unpickle file %s" % (filename_pkl, ))
f = open(filename_pkl, "r")
pkl = pickle.load(f)
f.close()
# sanity check:
if pkl['version'][:-1] != __version__.split(" ")[0][:-1] or pkl['url'] != url:
import warnings
warnings.warn('version or url mismatch! discarding cached wsdl', RuntimeWarning)
if debug:
log.debug('Version: %s %s' % (pkl['version'], __version__))
log.debug('URL: %s %s' % (pkl['url'], url))
force_download = True
else:
self.namespace = pkl['namespace']
self.documentation = pkl['documentation']
return pkl['services']
soap_ns = {
"http://schemas.xmlsoap.org/wsdl/soap/": 'soap11',
"http://schemas.xmlsoap.org/wsdl/soap12/": 'soap12',
}
wsdl_uri="http://schemas.xmlsoap.org/wsdl/"
xsd_uri="http://www.w3.org/2001/XMLSchema"
xsi_uri="http://www.w3.org/2001/XMLSchema-instance"
get_local_name = lambda s: s and str((':' in s) and s.split(':')[1] or s)
get_namespace_prefix = lambda s: s and str((':' in s) and s.split(':')[0] or None)
# always return an unicode object:
REVERSE_TYPE_MAP[u'string'] = unicode
def fetch(url):
"Download a document from a URL, save it locally if cache enabled"
# check / append a valid schema if not given:
url_scheme, netloc, path, query, fragment = urlsplit(url)
if not url_scheme in ('http','https', 'file'):
for scheme in ('http','https', 'file'):
try:
if not url.startswith("/") and scheme in ('http', 'https'):
tmp_url = "%s://%s" % (scheme, url)
else:
tmp_url = "%s:%s" % (scheme, url)
if debug: log.debug("Scheme not found, trying %s" % scheme)
return fetch(tmp_url)
except Exception, e:
log.error(e)
raise RuntimeError("No scheme given for url: %s" % url)
# make md5 hash of the url for caching...
filename = "%s.xml" % hashlib.md5(url).hexdigest()
if isinstance(cache, basestring):
filename = os.path.join(cache, filename)
if cache and os.path.exists(filename) and not force_download:
log.info("Reading file %s" % (filename, ))
f = open(filename, "r")
xml = f.read()
f.close()
else:
if url_scheme == 'file':
log.info("Fetching url %s using urllib2" % (url, ))
f = urllib2.urlopen(url)
xml = f.read()
else:
log.info("GET %s using %s" % (url, self.http._wrapper_version))
response, xml = self.http.request(url, "GET", None, {})
if cache:
log.info("Writing file %s" % (filename, ))
if not os.path.isdir(cache):
os.makedirs(cache)
f = open(filename, "w")
f.write(xml)
f.close()
return xml
# Open uri and read xml:
xml = fetch(url)
# Parse WSDL XML:
wsdl = SimpleXMLElement(xml, namespace=wsdl_uri)
# detect soap prefix and uri (xmlns attributes of <definitions>)
xsd_ns = None
soap_uris = {}
for k, v in wsdl[:]:
if v in soap_ns and k.startswith("xmlns:"):
soap_uris[get_local_name(k)] = v
if v== xsd_uri and k.startswith("xmlns:"):
xsd_ns = get_local_name(k)
# Extract useful data:
self.namespace = wsdl['targetNamespace']
self.documentation = unicode(wsdl('documentation', error=False) or '')
services = {}
bindings = {} # binding_name: binding
operations = {} # operation_name: operation
port_type_bindings = {} # port_type_name: binding
messages = {} # message: element
elements = {} # element: type def
for service in wsdl.service:
service_name=service['name']
if not service_name:
continue # empty service?
if debug: log.debug("Processing service %s" % service_name)
serv = services.setdefault(service_name, {'ports': {}})
serv['documentation']=service['documentation'] or ''
for port in service.port:
binding_name = get_local_name(port['binding'])
address = port('address', ns=soap_uris.values(), error=False)
location = address and address['location'] or None
soap_uri = address and soap_uris.get(address.get_prefix())
soap_ver = soap_uri and soap_ns.get(soap_uri)
bindings[binding_name] = {'service_name': service_name,
'location': location,
'soap_uri': soap_uri, 'soap_ver': soap_ver,
}
serv['ports'][port['name']] = bindings[binding_name]
for binding in wsdl.binding:
binding_name = binding['name']
if debug: log.debug("Processing binding %s" % service_name)
soap_binding = binding('binding', ns=soap_uris.values(), error=False)
transport = soap_binding and soap_binding['transport'] or None
port_type_name = get_local_name(binding['type'])
bindings[binding_name].update({
'port_type_name': port_type_name,
'transport': transport, 'operations': {},
})
port_type_bindings[port_type_name] = bindings[binding_name]
for operation in binding.operation:
op_name = operation['name']
op = operation('operation',ns=soap_uris.values(), error=False)
action = op and op['soapAction']
d = operations.setdefault(op_name, {})
bindings[binding_name]['operations'][op_name] = d
d.update({'name': op_name})
d['parts'] = {}
# input and/or ouput can be not present!
input = operation('input', error=False)
body = input and input('body', ns=soap_uris.values(), error=False)
d['parts']['input_body'] = body and body['parts'] or None
output = operation('output', error=False)
body = output and output('body', ns=soap_uris.values(), error=False)
d['parts']['output_body'] = body and body['parts'] or None
header = input and input('header', ns=soap_uris.values(), error=False)
d['parts']['input_header'] = header and {'message': header['message'], 'part': header['part']} or None
headers = output and output('header', ns=soap_uris.values(), error=False)
d['parts']['output_header'] = header and {'message': header['message'], 'part': header['part']} or None
#if action: #TODO: separe operation_binding from operation
if action:
d["action"] = action
def make_key(element_name, element_type):
"return a suitable key for elements"
# only distinguish 'element' vs other types
if element_type in ('complexType', 'simpleType'):
eltype = 'complexType'
else:
eltype = element_type
if eltype not in ('element', 'complexType', 'simpleType'):
raise RuntimeError("Unknown element type %s = %s" % (unicode(element_name), eltype))
return (unicode(element_name), eltype)
#TODO: cleanup element/schema/types parsing:
def process_element(element_name, node, element_type):
"Parse and define simple element types"
if debug:
log.debug("Processing element %s %s" % (element_name, element_type))
for tag in node:
if tag.get_local_name() in ("annotation", "documentation"):
continue
elif tag.get_local_name() in ('element', 'restriction'):
if debug: log.debug("%s has not children! %s" % (element_name,tag))
children = tag # element "alias"?
alias = True
elif tag.children():
children = tag.children()
alias = False
else:
if debug: log.debug("%s has not children! %s" % (element_name,tag))
continue #TODO: abstract?
d = OrderedDict()
for e in children:
t = e['type']
if not t:
t = e['base'] # complexContent (extension)!
if not t:
t = 'anyType' # no type given!
t = t.split(":")
if len(t)>1:
ns, type_name = t
else:
ns, type_name = None, t[0]
if element_name == type_name:
pass ## warning with infinite recursion
uri = ns and e.get_namespace_uri(ns) or xsd_uri
if uri==xsd_uri:
# look for the type, None == any
fn = REVERSE_TYPE_MAP.get(unicode(type_name), None)
else:
fn = None
if not fn:
# simple / complex type, postprocess later
fn = elements.setdefault(make_key(type_name, "complexType"), OrderedDict())
if e['name'] is not None and not alias:
e_name = unicode(e['name'])
d[e_name] = fn
else:
if debug: log.debug("complexConent/simpleType/element %s = %s" % (element_name, type_name))
d[None] = fn
if e['maxOccurs']=="unbounded" or (ns == 'SOAP-ENC' and type_name == 'Array'):
# it's an array... TODO: compound arrays?
d.array = True
if e is not None and e.get_local_name() == 'extension' and e.children():
# extend base element:
process_element(element_name, e.children(), element_type)
elements.setdefault(make_key(element_name, element_type), OrderedDict()).update(d)
# check axis2 namespace at schema types attributes
self.namespace = dict(wsdl.types("schema", ns=xsd_uri)[:]).get('targetNamespace', self.namespace)
imported_schemas = {}
def preprocess_schema(schema):
"Find schema elements and complex types"
for element in schema.children() or []:
if element.get_local_name() in ('import', ):
schema_namespace = element['namespace']
schema_location = element['schemaLocation']
if schema_location is None:
if debug: log.debug("Schema location not provided for %s!" % (schema_namespace, ))
continue
if schema_location in imported_schemas:
if debug: log.debug("Schema %s already imported!" % (schema_location, ))
continue
imported_schemas[schema_location] = schema_namespace
if debug: print "Importing schema %s from %s" % (schema_namespace, schema_location)
# Open uri and read xml:
xml = fetch(schema_location)
# Parse imported XML schema (recursively):
imported_schema = SimpleXMLElement(xml, namespace=xsd_uri)
preprocess_schema(imported_schema)
element_type = element.get_local_name()
if element_type in ('element', 'complexType', "simpleType"):
element_name = unicode(element['name'])
if debug: log.debug("Parsing Element %s: %s" % (element_type, element_name))
if element.get_local_name() == 'complexType':
children = element.children()
elif element.get_local_name() == 'simpleType':
children = element("restriction", ns=xsd_uri)
elif element.get_local_name() == 'element' and element['type']:
children = element
else:
children = element.children()
if children:
children = children.children()
elif element.get_local_name() == 'element':
children = element
if children:
process_element(element_name, children, element_type)
def postprocess_element(elements):
"Fix unresolved references (elements referenced before its definition, thanks .net)"
for k,v in elements.items():
if isinstance(v, OrderedDict):
if v.array:
elements[k] = [v] # convert arrays to python lists
if v!=elements: #TODO: fix recursive elements
postprocess_element(v)
if None in v and v[None]: # extension base?
if isinstance(v[None], dict):
for i, kk in enumerate(v[None]):
# extend base -keep orginal order-
if v[None] is not None:
elements[k].insert(kk, v[None][kk], i)
del v[None]
else: # "alias", just replace
if debug: log.debug("Replacing %s = %s" % (k, v[None]))
elements[k] = v[None]
#break
if isinstance(v, list):
for n in v: # recurse list
postprocess_element(n)
# process current wsdl schema:
for schema in wsdl.types("schema", ns=xsd_uri):
preprocess_schema(schema)
postprocess_element(elements)
for message in wsdl.message:
if debug: log.debug("Processing message %s" % message['name'])
for part in message('part', error=False) or []:
element = {}
element_name = part['element']
if not element_name:
# some implementations (axis) uses type instead
element_name = part['type']
type_ns = get_namespace_prefix(element_name)
type_uri = wsdl.get_namespace_uri(type_ns)
if type_uri == xsd_uri:
element_name = get_local_name(element_name)
fn = REVERSE_TYPE_MAP.get(unicode(element_name), None)
element = {part['name']: fn}
# emulate a true Element (complexType)
messages.setdefault((message['name'], None), {message['name']: OrderedDict()}).values()[0].update(element)
else:
element_name = get_local_name(element_name)
fn = elements.get(make_key(element_name, 'element'))
if not fn:
# some axis servers uses complexType for part messages
fn = elements.get(make_key(element_name, 'complexType'))
element = {message['name']: {part['name']: fn}}
else:
element = {element_name: fn}
messages[(message['name'], part['name'])] = element
def get_message(message_name, part_name):
if part_name:
# get the specific part of the message:
return messages.get((message_name, part_name))
else:
# get the first part for the specified message:
for (message_name_key, part_name_key), message in messages.items():
if message_name_key == message_name:
return message
for port_type in wsdl.portType:
port_type_name = port_type['name']
if debug: log.debug("Processing port type %s" % port_type_name)
binding = port_type_bindings[port_type_name]
for operation in port_type.operation:
op_name = operation['name']
op = operations[op_name]
op['documentation'] = unicode(operation('documentation', error=False) or '')
if binding['soap_ver']:
#TODO: separe operation_binding from operation (non SOAP?)
if operation("input", error=False):
input_msg = get_local_name(operation.input['message'])
input_header = op['parts'].get('input_header')
if input_header:
header_msg = get_local_name(input_header.get('message'))
header_part = get_local_name(input_header.get('part'))
# warning: some implementations use a separate message!
header = get_message(header_msg or input_msg, header_part)
else:
header = None # not enought info to search the header message:
op['input'] = get_message(input_msg, op['parts'].get('input_body'))
op['header'] = header
else:
op['input'] = None
op['header'] = None
if operation("output", error=False):
output_msg = get_local_name(operation.output['message'])
op['output'] = get_message(output_msg, op['parts'].get('output_body'))
else:
op['output'] = None
if debug:
import pprint
log.debug(pprint.pformat(services))
# Save parsed wsdl (cache)
if cache:
f = open(filename_pkl, "wb")
pkl = {
'version': __version__.split(" ")[0],
'url': url,
'namespace': self.namespace,
'documentation': self.documentation,
'services': services,
}
pickle.dump(pkl, f)
f.close()
return services
def __setitem__(self, item, value):
"Set SOAP Header value - this header will be sent for every request."
self.__headers[item] = value
def close(self):
"Finish the connection and remove temp files"
self.http.close()
if self.cacert.startswith(tempfile.gettempdir()):
if self.trace: log.info("removing %s" % self.cacert)
os.unlink(self.cacert)
def parse_proxy(proxy_str):
"Parses proxy address user:pass@host:port into a dict suitable for httplib2"
if isinstance(proxy_str, unicode):
proxy_str = proxy_str.encode("utf8")
proxy_dict = {}
if proxy_str is None:
return
if "@" in proxy_str:
user_pass, host_port = proxy_str.split("@")
else:
user_pass, host_port = "", proxy_str
if ":" in host_port:
host, port = host_port.split(":")
proxy_dict['proxy_host'], proxy_dict['proxy_port'] = host, int(port)
if ":" in user_pass:
proxy_dict['proxy_user'], proxy_dict['proxy_pass'] = user_pass.split(":")
return proxy_dict
if __name__ == "__main__":
pass
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"PySimpleSOAP"
import client
import server
import simplexml
import transport | Python |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Simple XML manipulation"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2008/009 Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "1.03a"
import datetime
import logging
import re
import time
import warnings
import xml.dom.minidom
from decimal import Decimal
log = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
DEBUG = False
try:
_strptime = datetime.datetime.strptime
except AttributeError: # python2.4
_strptime = lambda s, fmt: datetime.datetime(*(time.strptime(s, fmt)[:6]))
# Functions to serialize/deserialize special immutable types:
def datetime_u(s):
fmt = "%Y-%m-%dT%H:%M:%S"
try:
return _strptime(s, fmt)
except ValueError:
try:
# strip utc offset
if s[-3] == ":" and s[-6] in (' ', '-', '+'):
warnings.warn('removing unsupported UTC offset', RuntimeWarning)
s = s[:-6]
# parse microseconds
try:
return _strptime(s, fmt + ".%f")
except:
return _strptime(s, fmt)
except ValueError:
# strip microseconds (not supported in this platform)
if "." in s:
warnings.warn('removing unsuppported microseconds', RuntimeWarning)
s = s[:s.index(".")]
return _strptime(s, fmt)
datetime_m = lambda dt: dt.isoformat('T')
date_u = lambda s: _strptime(s[0:10], "%Y-%m-%d").date()
date_m = lambda d: d.strftime("%Y-%m-%d")
time_u = lambda s: _strptime(s, "%H:%M:%S").time()
time_m = lambda d: d.strftime("%H%M%S")
bool_u = lambda s: {'0':False, 'false': False, '1': True, 'true': True}[s]
bool_m = lambda s: {False: 'false', True: 'true'}[s]
# aliases:
class Alias(object):
def __init__(self, py_type, xml_type):
self.py_type, self.xml_type = py_type, xml_type
def __call__(self, value):
return self.py_type(value)
def __repr__(self):
return "<alias '%s' for '%s'>" % (self.xml_type, self.py_type)
byte = Alias(str,'byte')
short = Alias(int,'short')
double = Alias(float,'double')
integer = Alias(long,'integer')
DateTime = datetime.datetime
Date = datetime.date
Time = datetime.time
# Define convertion function (python type): xml schema type
TYPE_MAP = {
str:'string',
unicode:'string',
bool:'boolean',
short:'short',
byte:'byte',
int:'int',
long:'long',
integer:'integer',
float:'float',
double:'double',
Decimal:'decimal',
datetime.datetime:'dateTime',
datetime.date:'date',
}
TYPE_MARSHAL_FN = {
datetime.datetime:datetime_m,
datetime.date:date_m,
bool:bool_m
}
TYPE_UNMARSHAL_FN = {
datetime.datetime:datetime_u,
datetime.date:date_u,
bool:bool_u,
str:unicode,
}
REVERSE_TYPE_MAP = dict([(v,k) for k,v in TYPE_MAP.items()])
class OrderedDict(dict):
"Minimal ordered dictionary for xsd:sequences"
def __init__(self):
self.__keys = []
self.array = False
def __setitem__(self, key, value):
if key not in self.__keys:
self.__keys.append(key)
dict.__setitem__(self, key, value)
def insert(self, key, value, index=0):
if key not in self.__keys:
self.__keys.insert(index, key)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
if key in self.__keys:
self.__keys.remove(key)
dict.__delitem__(self, key)
def __iter__(self):
return iter(self.__keys)
def keys(self):
return self.__keys
def items(self):
return [(key, self[key]) for key in self.__keys]
def update(self, other):
for k,v in other.items():
self[k] = v
if isinstance(other, OrderedDict):
self.array = other.array
def __str__(self):
return "*%s*" % dict.__str__(self)
def __repr__(self):
s= "*{%s}*" % ", ".join(['%s: %s' % (repr(k),repr(v)) for k,v in self.items()])
if self.array and False:
s = "[%s]" % s
return s
class SimpleXMLElement(object):
"Simple XML manipulation (simil PHP)"
def __init__(self, text = None, elements = None, document = None,
namespace = None, prefix=None, namespaces_map={}):
"""
:param namespaces_map: How to map our namespace prefix to that given by the client;
{prefix: received_prefix}
"""
self.__namespaces_map = namespaces_map
_rx = "|".join(namespaces_map.keys()) # {'external': 'ext', 'model': 'mod'} -> 'external|model'
self.__ns_rx = re.compile(r"^(%s):.*$" % _rx) # And now we build an expression ^(external|model):.*$
# to find prefixes in all xml nodes i.e.: <model:code>1</model:code>
# and later change that to <mod:code>1</mod:code>
self.__ns = namespace
self.__prefix = prefix
if text is not None:
try:
self.__document = xml.dom.minidom.parseString(text)
except:
log.error(text)
raise
self.__elements = [self.__document.documentElement]
else:
self.__elements = elements
self.__document = document
def add_child(self, name, text=None, ns=True):
"Adding a child tag to a node"
if not ns or not self.__ns:
log.debug('adding %s', name)
element = self.__document.createElement(name)
else:
log.debug('adding %s ns "%s" %s', name, self.__ns, ns)
if self.__prefix:
element = self.__document.createElementNS(self.__ns, "%s:%s" % (self.__prefix, name))
else:
element = self.__document.createElementNS(self.__ns, name)
# don't append null tags!
if text is not None:
if isinstance(text, unicode):
element.appendChild(self.__document.createTextNode(text))
else:
element.appendChild(self.__document.createTextNode(str(text)))
self._element.appendChild(element)
return SimpleXMLElement(
elements=[element],
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix,
namespaces_map=self.__namespaces_map)
def __setattr__(self, tag, text):
"Add text child tag node (short form)"
if tag.startswith("_"):
object.__setattr__(self, tag, text)
else:
log.debug('__setattr__(%s, %s)', tag, text)
self.add_child(tag, text)
def __delattr__(self, tag):
"Remove a child tag (non recursive!)"
elements=[__element for __element in self._element.childNodes
if __element.nodeType == __element.ELEMENT_NODE
]
for element in elements:
self._element.removeChild(element)
def add_comment(self, data):
"Add an xml comment to this child"
comment = self.__document.createComment(data)
self._element.appendChild(comment)
def as_xml(self, filename=None, pretty=False):
"Return the XML representation of the document"
if not pretty:
return self.__document.toxml('UTF-8')
else:
return self.__document.toprettyxml(encoding='UTF-8')
def __repr__(self):
"Return the XML representation of this tag"
return self._element.toxml('UTF-8')
def get_name(self):
"Return the tag name of this node"
return self._element.tagName
def get_local_name(self):
"Return the tag loca name (prefix:name) of this node"
return self._element.localName
def get_prefix(self):
"Return the namespace prefix of this node"
return self._element.prefix
def get_namespace_uri(self, ns):
"Return the namespace uri for a prefix"
element = self._element
while element is not None and element.attributes is not None:
try:
return element.attributes['xmlns:%s' % ns].value
except KeyError:
element = element.parentNode
def attributes(self):
"Return a dict of attributes for this tag"
#TODO: use slice syntax [:]?
return self._element.attributes
def __getitem__(self, item):
"Return xml tag attribute value or a slice of attributes (iter)"
log.debug('__getitem__(%s)', item)
if isinstance(item, basestring):
if self._element.hasAttribute(item):
return self._element.attributes[item].value
elif isinstance(item, slice):
# return a list with name:values
return self._element.attributes.items()[item]
else:
# return element by index (position)
element = self.__elements[item]
return SimpleXMLElement(
elements=[element],
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix,
namespaces_map=self.__namespaces_map)
def add_attribute(self, name, value):
"Set an attribute value from a string"
self._element.setAttribute(name, value)
def __setitem__(self, item, value):
"Set an attribute value"
if isinstance(item,basestring):
self.add_attribute(item, value)
elif isinstance(item, slice):
# set multiple attributes at once
for k, v in value.items():
self.add_attribute(k, v)
def __call__(self, tag=None, ns=None, children=False, root=False,
error=True, ):
"Search (even in child nodes) and return a child tag by name"
try:
if root:
# return entire document
return SimpleXMLElement(
elements=[self.__document.documentElement],
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix,
namespaces_map=self.__namespaces_map
)
if tag is None:
# if no name given, iterate over siblings (same level)
return self.__iter__()
if children:
# future: filter children? by ns?
return self.children()
elements = None
if isinstance(tag, int):
# return tag by index
elements=[self.__elements[tag]]
if ns and not elements:
for ns_uri in isinstance(ns, (tuple, list)) and ns or (ns, ):
log.debug('searching %s by ns=%s', tag, ns_uri)
elements = self._element.getElementsByTagNameNS(ns_uri, tag)
if elements:
break
if self.__ns and not elements:
log.debug('searching %s by ns=%s', tag, self.__ns)
elements = self._element.getElementsByTagNameNS(self.__ns, tag)
if not elements:
log.debug('searching %s', tag)
elements = self._element.getElementsByTagName(tag)
if not elements:
#log.debug(self._element.toxml())
if error:
raise AttributeError(u"No elements found")
else:
return
return SimpleXMLElement(
elements=elements,
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix,
namespaces_map=self.__namespaces_map)
except AttributeError, e:
raise AttributeError(u"Tag not found: %s (%s)" % (tag, unicode(e)))
def __getattr__(self, tag):
"Shortcut for __call__"
return self.__call__(tag)
def __iter__(self):
"Iterate over xml tags at this level"
try:
for __element in self.__elements:
yield SimpleXMLElement(
elements=[__element],
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix,
namespaces_map=self.__namespaces_map)
except:
raise
def __dir__(self):
"List xml children tags names"
return [node.tagName for node
in self._element.childNodes
if node.nodeType != node.TEXT_NODE]
def children(self):
"Return xml children tags element"
elements=[__element for __element in self._element.childNodes
if __element.nodeType == __element.ELEMENT_NODE]
if not elements:
return None
#raise IndexError("Tag %s has no children" % self._element.tagName)
return SimpleXMLElement(
elements=elements,
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix,
namespaces_map=self.__namespaces_map)
def __len__(self):
"Return elements count"
return len(self.__elements)
def __contains__( self, item):
"Search for a tag name in this element or child nodes"
return self._element.getElementsByTagName(item)
def __unicode__(self):
"Returns the unicode text nodes of the current element"
if self._element.childNodes:
rc = u""
for node in self._element.childNodes:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
return ''
def __str__(self):
"Returns the str text nodes of the current element"
return unicode(self).encode("utf8","ignore")
def __int__(self):
"Returns the integer value of the current element"
return int(self.__str__())
def __float__(self):
"Returns the float value of the current element"
try:
return float(self.__str__())
except:
raise IndexError(self._element.toxml())
_element = property(lambda self: self.__elements[0])
def unmarshall(self, types, strict=True):
"Convert to python values the current serialized xml element"
# types is a dict of {tag name: convertion function}
# strict=False to use default type conversion if not specified
# example: types={'p': {'a': int,'b': int}, 'c': [{'d':str}]}
# expected xml: <p><a>1</a><b>2</b></p><c><d>hola</d><d>chau</d>
# returnde value: {'p': {'a':1,'b':2}, `'c':[{'d':'hola'},{'d':'chau'}]}
d = {}
for node in self():
name = str(node.get_local_name())
ref_name_type = None
# handle multirefs: href="#id0"
if 'href' in node.attributes().keys():
href = node['href'][1:]
for ref_node in self(root=True)("multiRef"):
if ref_node['id'] == href:
node = ref_node
ref_name_type = ref_node['xsi:type'].split(":")[1]
break
try:
fn = types[name]
except (KeyError, ), e:
if node.get_namespace_uri("soapenc"):
fn = None # ignore multirefs!
elif 'xsi:type' in node.attributes().keys():
xsd_type = node['xsi:type'].split(":")[1]
fn = REVERSE_TYPE_MAP[xsd_type]
elif strict:
raise TypeError(u"Tag: %s invalid (type not found)" % (name,))
else:
# if not strict, use default type conversion
fn = unicode
if isinstance(fn, list):
# append to existing list (if any) - unnested dict arrays -
value = d.setdefault(name, [])
children = node.children()
for child in (children and children() or []): # Readability counts
value.append(child.unmarshall(fn[0], strict))
elif isinstance(fn, tuple):
value = []
_d = {}
children = node.children()
as_dict = len(fn) == 1 and isinstance(fn[0], dict)
for child in (children and children() or []): # Readability counts
if as_dict:
_d.update(child.unmarshall(fn[0], strict)) # Merging pairs
else:
value.append(child.unmarshall(fn[0], strict))
if as_dict:
value.append(_d)
if name in d:
_tmp = list(d[name])
_tmp.extend(value)
value = tuple(_tmp)
else:
value = tuple(value)
elif isinstance(fn, dict):
##if ref_name_type is not None:
## fn = fn[ref_name_type]
children = node.children()
value = children and children.unmarshall(fn, strict)
else:
if fn is None: # xsd:anyType not unmarshalled
value = node
elif str(node) or fn == str:
try:
# get special deserialization function (if any)
fn = TYPE_UNMARSHAL_FN.get(fn,fn)
if fn == str:
# always return an unicode object:
value = unicode(node)
else:
value = fn(unicode(node))
except (ValueError, TypeError), e:
raise ValueError(u"Tag: %s: %s" % (name, unicode(e)))
else:
value = None
d[name] = value
return d
def _update_ns(self, name):
"""Replace the defined namespace alias with tohse used by the client."""
pref = self.__ns_rx.search(name)
if pref:
pref = pref.groups()[0]
try:
name = name.replace(pref, self.__namespaces_map[pref])
except KeyError:
log.warning('Unknown namespace alias %s' % name)
return name
def marshall(self, name, value, add_child=True, add_comments=False,
ns=False, add_children_ns=True):
"Analize python value and add the serialized XML element using tag name"
# Change node name to that used by a client
name = self._update_ns(name)
if isinstance(value, dict): # serialize dict (<key>value</key>)
child = add_child and self.add_child(name, ns=ns) or self
for k,v in value.items():
if not add_children_ns:
ns = False
child.marshall(k, v, add_comments=add_comments, ns=ns)
elif isinstance(value, tuple): # serialize tuple (<key>value</key>)
child = add_child and self.add_child(name, ns=ns) or self
if not add_children_ns:
ns = False
for k,v in value:
getattr(self, name).marshall(k, v, add_comments=add_comments, ns=ns)
elif isinstance(value, list): # serialize lists
child=self.add_child(name, ns=ns)
if not add_children_ns:
ns = False
if add_comments:
child.add_comment("Repetitive array of:")
for t in value:
child.marshall(name, t, False, add_comments=add_comments, ns=ns)
elif isinstance(value, basestring): # do not convert strings or unicodes
self.add_child(name, value,ns=ns)
elif value is None: # sent a empty tag?
self.add_child(name, ns=ns)
elif value in TYPE_MAP.keys():
# add commented placeholders for simple tipes (for examples/help only)
child = self.add_child(name, ns=ns)
child.add_comment(TYPE_MAP[value])
else: # the rest of object types are converted to string
# get special serialization function (if any)
fn = TYPE_MARSHAL_FN.get(type(value), str)
self.add_child(name, fn(value), ns=ns)
def import_node(self, other):
x = self.__document.importNode(other._element, True) # deep copy
self._element.appendChild(x)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# created by Massimo Di Pierro
# recreated by Vladyslav Kozlovskyy
# license MIT/BSD/GPL
import re
from cgi import escape
from string import maketrans
"""
TODO: next version should use MathJax
<script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js">
MathJax.Hub.Config({
extensions: ["tex2jax.js","TeX/AMSmath.js","TeX/AMSsymbols.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'], ["\\(","\\)"] ],
displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
"""
__all__ = ['render', 'markmin2html', 'markmin_escape']
__doc__ = """
# Markmin markup language
## About
This is a new markup language that we call markmin designed to produce high quality scientific papers and books and also put them online. We provide serializers for html, latex and pdf. It is implemented in the ``markmin2html`` function in the ``markmin2html.py``.
Example of usage:
``
m = "Hello **world** [[link http://web2py.com]]"
from markmin2html import markmin2html
print markmin2html(m)
from markmin2latex import markmin2latex
print markmin2latex(m)
from markmin2pdf import markmin2pdf # requires pdflatex
print markmin2pdf(m)
``
====================
# This is a test block
with new features:
This is a blockquote with
a list with tables in it:
-----------
This is a paragraph before list.
You can continue paragraph on the
next lines.
This is an ordered list with tables:
+ Item 1
+ Item 2
+ --------
aa|bb|cc
11|22|33
--------:tableclass1[tableid1]
+ Item 4
-----------
T1| T2| t3
===========
aaa|bbb|ccc
ddd|fff|ggg
123|0 |5.0
-----------:tableclass1
-----------:blockquoteclass[blockquoteid]
This this a new paragraph
with a followed table.
Table has header, footer, sections,
odd and even rows:
-------------------------------
**Title 1**|**Title 2**|**Title 3**
==============================
data 1 | data 2 | 2.00
data 3 |data4(long)| 23.00
|data 5 | 33.50
==============================
New section|New data | 5.00
data 1 |data2(long)|100.45
|data 3 | 12.50
data 4 | data 5 | .33
data 6 |data7(long)| 8.01
|data 8 | 514
==============================
Total: | 9 items |698,79
------------------------------:tableclass1[tableid2]
## Multilevel
lists
Now lists can be multilevel:
+ Ordered item 1 on level 1.
You can continue item text on
next strings
. paragraph in an item
++. Ordered item 1 of sublevel 2 with
a paragraph (paragraph can start
with point after plus or minus
characters, e.g. **++.** or **--.**)
++. This is another item. But with 3 paragraphs,
blockquote and sublists:
.. This is the second paragraph in the item. You
can add paragraphs to an item, using point
notation, where first characters in the string
are sequence of points with space between
them and another string. For example, this
paragraph (in sublevel 2) starts with two points:
``.. This is the second paragraph...``
.. ----------
### this is a blockquote in a list
You can use blockquote with headers, paragraphs,
tables and lists in it:
Tables can have or have not header and footer.
This table is defined without any header
and footer in it:
---------------------
red |fox | 0
blue |dolphin | 1000
green|leaf | 10000
---------------------
----------
.. This is yet another paragraph in the item.
--- This is an item of unordered list **(sublevel 3)**
--- This is the second item of the unordered list ''(sublevel 3)''
++++++ This is a single item of ordered list in sublevel 6
.... and this is a paragraph in sublevel 4
---. This is a new item with paragraph in sublevel 3.
++++ Start ordered list in sublevel 4 with code block: ``
line 1
line 2
line 3
``
++++. Yet another item with code block (we need to indent \`\` to add code block as part of item):
``
line 1
line 2
line 3
``
This item finishes with this paragraph.
... Item in sublevel 3 can be continued with paragraphs.
... ``
this is another
code block
in the
sublevel 3 item
``
+++ The last item in sublevel 3
.. This is a continuous paragraph for item 2 in sublevel 2.
You can use such structure to create difficult structured
documents.
++ item 3 in sublevel 2
-- item 1 in sublevel 2 (new unordered list)
-- item 2 in sublevel 2
-- item 3 in sublevel 2
++ item 1 in sublevel 2 (new ordered list)
++ item 2 in sublevel 2
++ item 3 in sublevle 2
+ item 2 in level 1
+ item 3 in level 1
- new unordered list (item 1 in level 1)
- level 2 in level 1
- level 3 in level 1
- level 4 in level 1
## This is the last section of the test
Single paragraph with '----' in it will be turned into separator:
-----------
And this is the last paragraph in
the test. Be happy!
====================
## Why?
We wanted a markup language with the following requirements:
- less than 300 lines of functional code
- easy to read
- secure
- support table, ul, ol, code
- support html5 video and audio elements (html serialization only)
- can align images and resize them
- can specify class for tables, blockquotes and code elements
- can add anchors
- does not use _ for markup (since it creates odd behavior)
- automatically links urls
- fast
- easy to extend
- supports latex and pdf including references
- allows to describe the markup in the markup (this document is generated from markmin syntax)
(results depend on text but in average for text ~100K markmin is 30% faster than markdown, for text ~10K it is 10x faster)
The [[web2py book http://www.lulu.com/product/paperback/web2py-%283rd-edition%29/12822827]] published by lulu, for example, was entirely generated with markmin2pdf from the online [[web2py wiki http://www.web2py.com/book]]
## Download
- http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2html.py
- http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2latex.py
- http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2pdf.py
markmin2html.py and markmin2latex.py are single files and have no web2py dependence. Their license is BSD.
## Examples
### Bold, italic, code and links
------------------------------------------------------------------------------
**SOURCE** | **OUTPUT**
==============================================================================
``# title`` | **title**
``## section`` | **section**
``### subsection`` | **subsection**
``**bold**`` | **bold**
``''italic''`` | ''italic''
``~~strikeout~~`` | ~~strikeout~~
``!`!`verbatim`!`!`` | ``verbatim``
``\`\`color with **bold**\`\`:red`` | ``color with **bold**``:red
``\`\`many colors\`\`:color[blue:#ffff00]`` | ``many colors``:color[blue:#ffff00]
``http://google.com`` | http://google.com
``[[**click** me #myanchor]]`` | [[**click** me #myanchor]]
``[[click me [extra info] #myanchor popup]]`` | [[click me [extra info] #myanchor popup]]
-------------------------------------------------------------------------------
### More on links
The format is always ``[[title link]]`` or ``[[title [extra] link]]``. Notice you can nest bold, italic, strikeout and code inside the link ``title``.
### Anchors [[myanchor]]
You can place an anchor anywhere in the text using the syntax ``[[name]]`` where ''name'' is the name of the anchor.
You can then link the anchor with [[link #myanchor]], i.e. ``[[link #myanchor]]`` or [[link with an extra info [extra info] #myanchor]], i.e.
``[[link with an extra info [extra info] #myanchor]]``.
### Images
[[alt-string for the image [the image title] http://www.web2py.com/examples/static/web2py_logo.png right 200px]]
This paragraph has an image aligned to the right with a width of 200px. Its is placed using the code
``[[alt-string for the image [the image title] http://www.web2py.com/examples/static/web2py_logo.png right 200px]]``.
### Unordered Lists
``
- Dog
- Cat
- Mouse
``
is rendered as
- Dog
- Cat
- Mouse
Two new lines between items break the list in two lists.
### Ordered Lists
``
+ Dog
+ Cat
+ Mouse
``
is rendered as
+ Dog
+ Cat
+ Mouse
### Multilevel Lists
``
+ Dogs
-- red
-- brown
-- black
+ Cats
-- fluffy
-- smooth
-- bald
+ Mice
-- small
-- big
-- huge
``
is rendered as
+ Dogs
-- red
-- brown
-- black
+ Cats
-- fluffy
-- smooth
-- bald
+ Mice
-- small
-- big
-- huge
### Tables (with optional header and/or footer)
Something like this
``
-----------------
**A**|**B**|**C**
=================
0 | 0 | X
0 | X | 0
X | 0 | 0
=================
**D**|**F**|**G**
-----------------:abc[id]
``
is a table and is rendered as
-----------------
**A**|**B**|**C**
=================
0 | 0 | X
0 | X | 0
X | 0 | 0
=================
**D**|**F**|**G**
-----------------:abc[id]
Four or more dashes delimit the table and | separates the columns.
The ``:abc``, ``:id[abc_1]`` or ``:abc[abc_1]`` at the end sets the class and/or id for the table and it is optional.
### Blockquote
A table with a single cell is rendered as a blockquote:
-----
Hello world
-----
Blockquote can contain headers, paragraphs, lists and tables:
``
-----
This is a paragraph in a blockquote
+ item 1
+ item 2
-- item 2.1
-- item 2.2
+ item 3
---------
0 | 0 | X
0 | X | 0
X | 0 | 0
---------:tableclass1
-----
``
is rendered as:
-----
This is a paragraph in a blockquote
+ item 1
+ item 2
-- item 2.1
-- item 2.2
+ item 3
---------
0 | 0 | X
0 | X | 0
X | 0 | 0
---------:tableclass1
-----
### Code, ``<code>``, escaping and extra stuff
``
def test():
return "this is Python code"
``:python
Optionally a ` inside a ``!`!`...`!`!`` block can be inserted escaped with !`!.
**NOTE:** You can escape markmin constructions (\\'\\',\`\`,\*\*,\~\~,\[,\{,\]\},\$,\@) with '\\\\' character:
so \\\\`\\\\` can replace !`!`! escape string
The ``:python`` after the markup is also optional. If present, by default, it is used to set the class of the <code> block.
The behavior can be overridden by passing an argument ``extra`` to the ``render`` function. For example:
``
markmin2html("!`!!`!aaa!`!!`!:custom",
extra=dict(custom=lambda text: 'x'+text+'x'))
``:python
generates
``'xaaax'``:python
(the ``!`!`...`!`!:custom`` block is rendered by the ``custom=lambda`` function passed to ``render``).
### Line breaks
``[[NEWLINE]]`` tag is used to break lines:
``
#### Multiline [[NEWLINE]]
title
paragraph [[NEWLINE]]
with breaks[[NEWLINE]]in it
``
generates:
#### Multiline [[NEWLINE]]
title
paragraph [[NEWLINE]]
with breaks[[NEWLINE]]in it
### Html5 support
Markmin also supports the <video> and <audio> html5 tags using the notation:
``
[[message link video]]
[[message link audio]]
[[message [title] link video]]
[[message [title] link audio]]
``
where ``message`` will be shown in browsers without HTML5 video/audio tags support.
### Latex and other extensions
Formulas can be embedded into HTML with ''\$\$``formula``\$\$''.
You can use Google charts to render the formula:
``
LATEX = '<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" />'
markmin2html(text,{'latex':lambda code: LATEX % code.replace('"','\\\\"')})
``
### Code with syntax highlighting
This requires a syntax highlighting tool, such as the web2py CODE helper.
``
extra={'code_cpp':lambda text: CODE(text,language='cpp').xml(),
'code_java':lambda text: CODE(text,language='java').xml(),
'code_python':lambda text: CODE(text,language='python').xml(),
'code_html':lambda text: CODE(text,language='html').xml()}
``
or simple:
``
extra={'code':lambda text,lang='python': CODE(text,language=lang).xml()}
``
``
markmin2html(text,extra=extra)
``
Code can now be marked up as in this example:
``
!`!`
<html><body>example</body></html>
!`!`:code_html
``
OR
``
!`!`
<html><body>example</body></html>
!`!`:code[html]
``
### Citations and References
Citations are treated as internal links in html and proper citations in latex if there is a final section called "References". Items like
``
- [[key]] value
``
in the References will be translated into Latex
``
\\bibitem{key} value
``
Here is an example of usage:
``
As shown in Ref.!`!`mdipierro`!`!:cite
## References
- [[mdipierro]] web2py Manual, 3rd Edition, lulu.com
``
### Caveats
``<ul/>``, ``<ol/>``, ``<code/>``, ``<table/>``, ``<blockquote/>``, ``<h1/>``, ..., ``<h6/>`` do not have ``<p>...</p>`` around them.
"""
html_colors=['aqua', 'black', 'blue', 'fuchsia', 'gray', 'green',
'lime', 'maroon', 'navy', 'olive', 'purple', 'red',
'silver', 'teal', 'white', 'yellow']
META = '\x06'
LINK = '\x07'
DISABLED_META = '\x08'
LATEX = '<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" />'
regex_URL=re.compile(r'@/(?P<a>\w*)/(?P<c>\w*)/(?P<f>\w*(\.\w+)?)(/(?P<args>[\w\.\-/]+))?')
regex_env=re.compile(r'@\{(?P<a>[\w\-\.]+?)(\:(?P<b>.*?))?\}')
regex_expand_meta = re.compile('('+META+'|'+DISABLED_META+'|````)')
regex_dd=re.compile(r'\$\$(?P<latex>.*?)\$\$')
regex_code = re.compile('('+META+'|'+DISABLED_META+r'|````)|(``(?P<t>.+?)``(?::(?P<c>[a-zA-Z][_a-zA-Z\-\d]*)(?:\[(?P<p>[^\]]*)\])?)?)',re.S)
regex_strong=re.compile(r'\*\*(?P<t>[^\s*]+( +[^\s*]+)*)\*\*')
regex_del=re.compile(r'~~(?P<t>[^\s*]+( +[^\s*]+)*)~~')
regex_em=re.compile(r"''(?P<t>[^\s']+(?: +[^\s']+)*)''")
regex_num=re.compile(r"^\s*[+-]?((\d+(\.\d*)?)|\.\d+)([eE][+-]?[0-9]+)?\s*$")
regex_list=re.compile('^(?:(?:(#{1,6})|(?:(\.+|\++|\-+)(\.)?))\s*)?(.*)$')
regex_bq_headline=re.compile('^(?:(\.+|\++|\-+)(\.)?\s+)?(-{3}-*)$')
regex_tq=re.compile('^(-{3}-*)(?::(?P<c>[a-zA-Z][_a-zA-Z\-\d]*)(?:\[(?P<p>[a-zA-Z][_a-zA-Z\-\d]*)\])?)?$')
regex_proto = re.compile(r'(?<!["\w>/=])(?P<p>\w+):(?P<k>\w+://[\w\d\-+=?%&/:.]+)', re.M)
regex_auto = re.compile(r'(?<!["\w>/=])(?P<k>\w+://[\w\d\-+_=?%&/:.,;#]+\w)',re.M)
regex_link=re.compile(r'('+LINK+r')|\[\[(?P<s>.+?)\]\]',re.S)
regex_link_level2=re.compile(r'^(?P<t>\S.*?)?(?:\s+\[(?P<a>.+?)\])?(?:\s+(?P<k>\S+))?(?:\s+(?P<p>popup))?\s*$',re.S)
regex_media_level2=re.compile(r'^(?P<t>\S.*?)?(?:\s+\[(?P<a>.+?)\])?(?:\s+(?P<k>\S+))?\s+(?P<p>img|IMG|left|right|center|video|audio|blockleft|blockright)(?:\s+(?P<w>\d+px))?\s*$',re.S)
regex_markmin_escape = re.compile(r"(\\*)(['`:*~\\[\]{}@\$+\-.#\n])")
regex_backslash = re.compile(r"\\(['`:*~\\[\]{}@\$+\-.#\n])")
ttab_in = maketrans("'`:*~\\[]{}@$+-.#\n", '\x0b\x0c\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x05')
ttab_out = maketrans('\x0b\x0c\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x05',"'`:*~\\[]{}@$+-.#\n")
def markmin_escape(text):
""" insert \\ before markmin control characters: '`:*~[]{}@$ """
return regex_markmin_escape.sub(
lambda m: '\\'+m.group(0).replace('\\','\\\\'), text)
def replace_autolinks(text,autolinks):
return regex_auto.sub(lambda m: autolinks(m.group('k')), text)
def replace_at_urls(text,url):
# this is experimental @{function/args}
def u1(match,url=url):
a,c,f,args = match.group('a','c','f','args')
return url(a=a or None,c=c or None,f = f or None,
args=(args or '').split('/'), scheme=True, host=True)
return regex_URL.sub(u1,text)
def replace_components(text,env):
def u2(match, env=env):
f = env.get(match.group('a'), match.group(0))
if callable(f):
try:
f = f(match.group('b'))
except Exception, e:
f = 'ERROR: %s' % e
return str(f)
return regex_env.sub(u2, text)
def autolinks_simple(url):
"""
it automatically converts the url to link,
image, video or audio tag
"""
u_url=url.lower()
if u_url.endswith(('.jpg','.jpeg','.gif','.png')):
return '<img src="%s" controls />' % url
elif u_url.endswith(('.mp4','.mpeg','.mov','.ogv')):
return '<video src="%s" controls></video>' % url
elif u_url.endswith(('.mp3','.wav','.ogg')):
return '<audio src="%s" controls></audio>' % url
return '<a href="%s">%s</a>' % (url,url)
def protolinks_simple(proto, url):
"""
it converts url to html-string using appropriate proto-prefix:
Uses for construction "proto:url", e.g.:
"iframe:http://www.example.com/path" will call protolinks()
with parameters:
proto="iframe"
url="http://www.example.com/path"
"""
if proto in ('iframe','embed'): #== 'iframe':
return '<iframe src="%s" frameborder="0" allowfullscreen></iframe>'%url
#elif proto == 'embed': # NOTE: embed is a synonym to iframe now
# return '<a href="%s" class="%sembed">%s></a>'%(url,class_prefix,url)
elif proto == 'qr':
return '<img style="width:100px" src="http://chart.apis.google.com/chart?cht=qr&chs=100x100&chl=%s&choe=UTF-8&chld=H" alt="QR Code" title="QR Code" />'%url
return proto+':'+url
def render(text,
extra={},
allowed={},
sep='p',
URL=None,
environment=None,
latex='google',
autolinks='default',
protolinks='default',
class_prefix='',
id_prefix='markmin_',
pretty_print=False):
"""
Arguments:
- text is the text to be processed
- extra is a dict like extra=dict(custom=lambda value: value) that process custom code
as in " ``this is custom code``:custom "
- allowed is a dictionary of list of allowed classes like
allowed = dict(code=('python','cpp','java'))
- sep can be 'p' to separate text in <p>...</p>
or can be 'br' to separate text using <br />
- URL -
- environment is a dictionary of environment variables (can be accessed with @{variable}
- latex -
- autolinks is a function to convert auto urls to html-code (default is autolinks(url) )
- protolinks is a function to convert proto-urls (e.g."proto:url") to html-code
(default is protolinks(proto,url))
- class_prefix is a prefix for ALL classes in markmin text. E.g. if class_prefix='my_'
then for ``test``:cls class will be changed to "my_cls" (default value is '')
- id_prefix is prefix for ALL ids in markmin text (default value is 'markmin_'). E.g.:
-- [[id]] will be converted to <span class="anchor" id="markmin_id"></span>
-- [[link #id]] will be converted to <a href="#markmin_id">link</a>
-- ``test``:cls[id] will be converted to <code class="cls" id="markmin_id">test</code>
>>> render('this is\\n# a section\\n\\nparagraph')
'<p>this is</p><h1>a section</h1><p>paragraph</p>'
>>> render('this is\\n## a subsection\\n\\nparagraph')
'<p>this is</p><h2>a subsection</h2><p>paragraph</p>'
>>> render('this is\\n### a subsubsection\\n\\nparagraph')
'<p>this is</p><h3>a subsubsection</h3><p>paragraph</p>'
>>> render('**hello world**')
'<p><strong>hello world</strong></p>'
>>> render('``hello world``')
'<code>hello world</code>'
>>> render('``hello world``:python')
'<code class="python">hello world</code>'
>>> render('``\\nhello\\nworld\\n``:python')
'<pre><code class="python">hello\\nworld</code></pre>'
>>> render('``hello world``:python[test_id]')
'<code class="python" id="markmin_test_id">hello world</code>'
>>> render('``hello world``:id[test_id]')
'<code id="markmin_test_id">hello world</code>'
>>> render('``\\nhello\\nworld\\n``:python[test_id]')
'<pre><code class="python" id="markmin_test_id">hello\\nworld</code></pre>'
>>> render('``\\nhello\\nworld\\n``:id[test_id]')
'<pre><code id="markmin_test_id">hello\\nworld</code></pre>'
>>> render("''hello world''")
'<p><em>hello world</em></p>'
>>> render('** hello** **world**')
'<p>** hello** <strong>world</strong></p>'
>>> render('- this\\n- is\\n- a list\\n\\nand this\\n- is\\n- another')
'<ul><li>this</li><li>is</li><li>a list</li></ul><p>and this</p><ul><li>is</li><li>another</li></ul>'
>>> render('+ this\\n+ is\\n+ a list\\n\\nand this\\n+ is\\n+ another')
'<ol><li>this</li><li>is</li><li>a list</li></ol><p>and this</p><ol><li>is</li><li>another</li></ol>'
>>> render("----\\na | b\\nc | d\\n----\\n")
'<table><tbody><tr class="first"><td>a</td><td>b</td></tr><tr class="even"><td>c</td><td>d</td></tr></tbody></table>'
>>> render("----\\nhello world\\n----\\n")
'<blockquote>hello world</blockquote>'
>>> render('[[http://example.com]]')
'<p><span class="anchor" id="markmin_http://example.com"></span></p>'
>>> render('[[ http://example.com]]')
'<p><a href="http://example.com">http://example.com</a></p>'
>>> render('[[bookmark [http://example.com] ]]')
'<p><span class="anchor" id="markmin_bookmark"><a href="http://example.com">http://example.com</a></span></p>'
>>> render('[[this is a link http://example.com]]')
'<p><a href="http://example.com">this is a link</a></p>'
>>> render('[[this is an image http://example.com left]]')
'<p><img src="http://example.com" alt="this is an image" style="float:left" /></p>'
>>> render('[[this is an image http://example.com left 200px]]')
'<p><img src="http://example.com" alt="this is an image" style="float:left;width:200px" /></p>'
>>> render("[[Your browser doesn't support <video> HTML5 tag http://example.com video]]")
'<p><video controls="controls"><source src="http://example.com" />Your browser doesn\\'t support <video> HTML5 tag</video></p>'
>>> render("[[Your browser doesn't support <audio> HTML5 tag http://example.com audio]]")
'<p><audio controls="controls"><source src="http://example.com" />Your browser doesn\\'t support <audio> HTML5 tag</audio></p>'
>>> render("[[Your\\nbrowser\\ndoesn't\\nsupport\\n<audio> HTML5 tag http://exam\\\\\\nple.com\\naudio]]")
'<p><audio controls="controls"><source src="http://example.com" />Your browser doesn\\'t support <audio> HTML5 tag</audio></p>'
>>> render('[[this is a **link** http://example.com]]')
'<p><a href="http://example.com">this is a <strong>link</strong></a></p>'
>>> render("``aaa``:custom", extra=dict(custom=lambda text: 'x'+text+'x'))
'xaaax'
>>> print render(r"$$\int_a^b sin(x)dx$$")
<img src="http://chart.apis.google.com/chart?cht=tx&chl=\\int_a^b sin(x)dx" />
>>> markmin2html(r"use backslash: \[\[[[mess\[[ag\]]e link]]\]]")
'<p>use backslash: [[<a href="link">mess[[ag]]e</a>]]</p>'
>>> markmin2html("backslash instead of exclamation sign: \``probe``")
'<p>backslash instead of exclamation sign: ``probe``</p>'
>>> render(r"simple image: [[\[[this is an image\]] http://example.com IMG]]!!!")
'<p>simple image: <img src="http://example.com" alt="[[this is an image]]" />!!!</p>'
>>> render(r"simple link no anchor with popup: [[ http://example.com popup]]")
'<p>simple link no anchor with popup: <a href="http://example.com" target="_blank">http://example.com</a></p>'
>>> render("auto-url: http://example.com")
'<p>auto-url: <a href="http://example.com">http://example.com</a></p>'
>>> render("auto-image: (http://example.com/image.jpeg)")
'<p>auto-image: (<img src="http://example.com/image.jpeg" controls />)</p>'
>>> render("qr: (qr:http://example.com/image.jpeg)")
'<p>qr: (<img style="width:100px" src="http://chart.apis.google.com/chart?cht=qr&chs=100x100&chl=http://example.com/image.jpeg&choe=UTF-8&chld=H" alt="QR Code" title="QR Code" />)</p>'
>>> render("embed: (embed:http://example.com/page)")
'<p>embed: (<iframe src="http://example.com/page" frameborder="0" allowfullscreen></iframe>)</p>'
>>> render("iframe: (iframe:http://example.com/page)")
'<p>iframe: (<iframe src="http://example.com/page" frameborder="0" allowfullscreen></iframe>)</p>'
>>> render("title1: [[test message [simple \[test\] title] http://example.com ]] test")
'<p>title1: <a href="http://example.com" title="simple [test] title">test message</a> test</p>'
>>> render("title2: \[\[[[test message [simple title] http://example.com popup]]\]]")
'<p>title2: [[<a href="http://example.com" title="simple title" target="_blank">test message</a>]]</p>'
>>> render("title3: [[ [link w/o anchor but with title] http://www.example.com ]]")
'<p>title3: <a href="http://www.example.com" title="link w/o anchor but with title">http://www.example.com</a></p>'
>>> render("title4: [[ [simple title] http://www.example.com popup]]")
'<p>title4: <a href="http://www.example.com" title="simple title" target="_blank">http://www.example.com</a></p>'
>>> render("title5: [[test message [simple title] http://example.com IMG]]")
'<p>title5: <img src="http://example.com" alt="test message" title="simple title" /></p>'
>>> render("title6: [[[test message w/o title] http://example.com IMG]]")
'<p>title6: <img src="http://example.com" alt="[test message w/o title]" /></p>'
>>> render("title7: [[[this is not a title] [this is a title] http://example.com IMG]]")
'<p>title7: <img src="http://example.com" alt="[this is not a title]" title="this is a title" /></p>'
>>> render("title8: [[test message [title] http://example.com center]]")
'<p>title8: <p style="text-align:center"><img src="http://example.com" alt="test message" title="title" /></p></p>'
>>> render("title9: [[test message [title] http://example.com left]]")
'<p>title9: <img src="http://example.com" alt="test message" title="title" style="float:left" /></p>'
>>> render("title10: [[test message [title] http://example.com right 100px]]")
'<p>title10: <img src="http://example.com" alt="test message" title="title" style="float:right;width:100px" /></p>'
>>> render("title11: [[test message [title] http://example.com center 200px]]")
'<p>title11: <p style="text-align:center"><img src="http://example.com" alt="test message" title="title" style="width:200px" /></p></p>'
>>> render(r"\\[[probe]]")
'<p>[[probe]]</p>'
>>> render(r"\\\\[[probe]]")
'<p>\\\\<span class="anchor" id="markmin_probe"></span></p>'
>>> render(r"\\\\\\[[probe]]")
'<p>\\\\[[probe]]</p>'
>>> render(r"\\\\\\\\[[probe]]")
'<p>\\\\\\\\<span class="anchor" id="markmin_probe"></span></p>'
>>> render(r"\\\\\\\\\[[probe]]")
'<p>\\\\\\\\[[probe]]</p>'
>>> render(r"\\\\\\\\\\\[[probe]]")
'<p>\\\\\\\\\\\\<span class="anchor" id="markmin_probe"></span></p>'
>>> render("``[[ [\\[[probe\]\\]] URL\\[x\\]]]``:red[dummy_params]")
'<span style="color: red"><a href="URL[x]" title="[[probe]]">URL[x]</a></span>'
>>> render("the \\**text**")
'<p>the **text**</p>'
>>> render("the \\``text``")
'<p>the ``text``</p>'
>>> render("the \\\\''text''")
"<p>the ''text''</p>"
>>> render("the [[link [**with** ``<b>title</b>``:red] http://www.example.com]]")
'<p>the <a href="http://www.example.com" title="**with** ``<b>title</b>``:red">link</a></p>'
>>> render("the [[link \\[**without** ``<b>title</b>``:red\\] http://www.example.com]]")
'<p>the <a href="http://www.example.com">link [<strong>without</strong> <span style="color: red"><b>title</b></span>]</a></p>'
>>> render("aaa-META-``code``:text[]-LINK-[[link http://www.example.com]]-LINK-[[image http://www.picture.com img]]-end")
'<p>aaa-META-<code class="text">code</code>-LINK-<a href="http://www.example.com">link</a>-LINK-<img src="http://www.picture.com" alt="image" />-end</p>'
>>> render("[[<a>test</a> [<a>test2</a>] <a>text3</a>]]")
'<p><a href="<a>text3</a>" title="<a>test2</a>"><a>test</a></a></p>'
>>> render("[[<a>test</a> [<a>test2</a>] <a>text3</a> IMG]]")
'<p><img src="<a>text3</a>" alt="<a>test</a>" title="<a>test2</a>" /></p>'
>>> render("**bold** ''italic'' ~~strikeout~~")
'<p><strong>bold</strong> <em>italic</em> <del>strikeout</del></p>'
>>> render("this is ``a red on yellow text``:c[#FF0000:#FFFF00]")
'<p>this is <span style="color: #FF0000;background-color: #FFFF00;">a red on yellow text</span></p>'
>>> render("this is ``a text with yellow background``:c[:yellow]")
'<p>this is <span style="background-color: yellow;">a text with yellow background</span></p>'
>>> render("this is ``a colored text (RoyalBlue)``:color[rgb(65,105,225)]")
'<p>this is <span style="color: rgb(65,105,225);">a colored text (RoyalBlue)</span></p>'
>>> render("this is ``a green text``:color[green:]")
'<p>this is <span style="color: green;">a green text</span></p>'
>>> render("**@{probe:1}**", environment=dict(probe=lambda t:"test %s" % t))
'<p><strong>test 1</strong></p>'
>>> render('[[id1 [span **messag** in ''markmin''] ]] ... [[**link** to id [link\\\'s title] #mark1]]')
'<p><span class="anchor" id="markmin_id1">span <strong>messag</strong> in markmin</span> ... <a href="#markmin_mark1" title="link\\\'s title"><strong>link</strong> to id</a></p>'
>>> render('# Multiline[[NEWLINE]]\\n title\\nParagraph[[NEWLINE]]\\nwith breaks[[NEWLINE]]\\nin it')
'<h1>Multiline<br /> title</h1><p>Paragraph<br /> with breaks<br /> in it</p>'
>>> render("anchor with name 'NEWLINE': [[NEWLINE [ ] ]]")
'<p>anchor with name \\'NEWLINE\\': <span class="anchor" id="markmin_NEWLINE"></span></p>'
>>> render("anchor with name 'NEWLINE': [[NEWLINE [newline] ]]")
'<p>anchor with name \\'NEWLINE\\': <span class="anchor" id="markmin_NEWLINE">newline</span></p>'
"""
if autolinks=="default": autolinks = autolinks_simple
if protolinks=="default": protolinks = protolinks_simple
pp='\n' if pretty_print else ''
if isinstance(text,unicode):
text = text.encode('utf8')
text = str(text or '')
text = regex_backslash.sub(lambda m: m.group(1).translate(ttab_in), text)
text = text.replace('\x05','') # concatenate strings separeted by \\n
if URL is not None:
text = replace_at_urls(text,URL)
if latex == 'google':
text = regex_dd.sub('``\g<latex>``:latex ', text)
#############################################################
# replace all blocks marked with ``...``:class[id] with META
# store them into segments they will be treated as code
#############################################################
segments = []
def mark_code(m):
g = m.group(0)
if g in (META, DISABLED_META ):
segments.append((None, None, None, g))
return m.group()
elif g == '````':
segments.append((None, None, None, ''))
return m.group()
else:
c = m.group('c') or ''
p = m.group('p') or ''
if 'code' in allowed and not c in allowed['code']: c = ''
code = m.group('t').replace('!`!','`')
segments.append((code, c, p, m.group(0)))
return META
text = regex_code.sub(mark_code, text)
#############################################################
# replace all blocks marked with [[...]] with LINK
# store them into links they will be treated as link
#############################################################
links = []
def mark_link(m):
links.append( None if m.group() == LINK
else m.group('s') )
return LINK
text = regex_link.sub(mark_link, text)
text = escape(text)
if protolinks:
text = regex_proto.sub(lambda m: protolinks(*m.group('p','k')), text)
if autolinks:
text = replace_autolinks(text,autolinks)
#############################################################
# normalize spaces
#############################################################
strings=text.split('\n')
def parse_title(t, s): #out, lev, etags, tag, s):
hlevel=str(len(t))
out.extend(etags[::-1])
out.append("<h%s>%s"%(hlevel,s))
etags[:]=["</h%s>%s"%(hlevel,pp)]
lev=0
ltags[:]=[]
tlev[:]=[]
return (lev, 'h')
def parse_list(t, p, s, tag, lev, mtag, lineno):
lent=len(t)
if lent<lev: # current item level < previous item level
while ltags[-1]>lent:
ltags.pop()
out.append(etags.pop())
lev=lent
tlev[lev:]=[]
if lent>lev: # current item level > previous item level
if lev==0: # previous line is not a list (paragraph or title)
out.extend(etags[::-1])
ltags[:]=[]
tlev[:]=[]
etags[:]=[]
if pend and mtag == '.': # paragraph in a list:
out.append(etags.pop())
ltags.pop()
for i in xrange(lent-lev):
out.append('<'+tag+'>'+pp)
etags.append('</'+tag+'>'+pp)
lev+=1
ltags.append(lev)
tlev.append(tag)
elif lent == lev:
if tlev[-1] != tag:
# type of list is changed (ul<=>ol):
for i in xrange(ltags.count(lent)):
ltags.pop()
out.append(etags.pop())
tlev[-1]=tag
out.append('<'+tag+'>'+pp)
etags.append('</'+tag+'>'+pp)
ltags.append(lev)
else:
if ltags.count(lev)>1:
out.append(etags.pop())
ltags.pop()
mtag='l'
out.append('<li>')
etags.append('</li>'+pp)
ltags.append(lev)
if s[:1] == '-':
(s, mtag, lineno) = parse_table_or_blockquote(s, mtag, lineno)
if p and mtag=='l':
(lev,mtag,lineno)=parse_point(t, s, lev, '', lineno)
else:
out.append(s)
return (lev, mtag, lineno)
def parse_point(t, s, lev, mtag, lineno):
""" paragraphs in lists """
lent=len(t)
if lent>lev:
return parse_list(t, '.', s, 'ul', lev, mtag, lineno)
elif lent<lev:
while ltags[-1]>lent:
ltags.pop()
out.append(etags.pop())
lev=lent
tlev[lev:]=[]
mtag=''
elif lent==lev:
if pend and mtag == '.':
out.append(etags.pop())
ltags.pop()
if br and mtag in ('l','.'):
out.append(br)
if s == META:
mtag = ''
else:
mtag = '.'
if s[:1] == '-':
(s, mtag, lineno) = parse_table_or_blockquote(s, mtag, lineno)
if mtag == '.':
out.append(pbeg)
if pend:
etags.append(pend)
ltags.append(lev)
out.append(s)
return (lev, mtag, lineno)
def parse_table_or_blockquote(s, mtag, lineno):
# check next line. If next line :
# - is empty -> this is an <hr /> tag
# - consists '|' -> table
# - consists other characters -> blockquote
if ( lineno+1 >= strings_len or
not (s.count('-') == len(s) and len(s)>3) ):
return (s, mtag, lineno)
lineno+=1
s = strings[lineno].strip()
if s:
if '|' in s:
# table
tout=[]
thead=[]
tbody=[]
rownum=0
t_id = ''
t_cls = ''
# parse table:
while lineno < strings_len:
s = strings[lineno].strip()
if s[:1] == '=':
if s.count('=')==len(s) and len(s)>3: # header or footer
if not thead: # if thead list is empty:
thead = tout
else:
tbody.extend(tout)
tout = []
rownum=0
lineno+=1
continue
m = regex_tq.match(s)
if m:
t_cls = m.group('c') or ''
t_id = m.group('p') or ''
break
if rownum % 2:
tr = '<tr class="even">'
else:
tr = '<tr class="first">' if rownum == 0 else '<tr>'
tout.append(tr+''.join(['<td%s>%s</td>'% \
(' class="num"'
if regex_num.match(f)
else '',
f.strip()
) for f in s.split('|')])+'</tr>'+pp)
rownum+=1
lineno+=1
t_cls = ' class="%s%s"'%(class_prefix, t_cls) if t_cls and t_cls != 'id' else ''
t_id = ' id="%s%s"'%(id_prefix, t_id) if t_id else ''
s = ''
if thead:
s += '<thead>'+pp+''.join([l for l in thead])+'</thead>'+pp
if not tbody: # tbody strings are in tout list
tbody = tout
tout = []
if tbody: # if tbody list is not empty:
s += '<tbody>'+pp+''.join([l for l in tbody])+'</tbody>'+pp
if tout: # tfoot is not empty:
s += '<tfoot>'+pp+''.join([l for l in tout])+'</tfoot>'+pp
s = '<table%s%s>%s%s</table>%s' % (t_cls, t_id, pp, s, pp)
mtag='t'
else:
# parse blockquote:
bq_begin=lineno
t_mode = False # embidded table
t_cls = ''
t_id = ''
# search blockquote closing line:
while lineno < strings_len:
s = strings[lineno].strip()
if not t_mode:
m = regex_tq.match(s)
if m:
if lineno+1 == strings_len or '|' not in strings[lineno+1]:
t_cls = m.group('c') or ''
t_id = m.group('p') or ''
break
if regex_bq_headline.match(s):
if lineno+1 < strings_len and strings[lineno+1].strip():
t_mode = True
lineno+=1
continue
elif regex_tq.match(s):
t_mode=False
lineno+=1
continue
lineno+=1
t_cls = ' class="%s%s"'%(class_prefix,t_cls) if t_cls and t_cls != 'id' else ''
t_id = ' id="%s%s"'%(id_prefix,t_id) if t_id else ''
s = '<blockquote%s%s>%s</blockquote>%s' \
% (t_cls,
t_id,
render('\n'.join(strings[bq_begin:lineno]),
extra,
allowed,
'br',
URL,
environment,
latex,
autolinks,
protolinks,
class_prefix,
id_prefix,
pretty_print),
pp
)
mtag='q'
else:
s = '<hr />'
lineno-=1
mtag='q'
return (s, 'q', lineno)
if sep == 'p':
pbeg = "<p>"
pend = "</p>"+pp
br = ''
else:
pbeg = pend = ''
br = "<br />"+pp if sep=='br' else ''
lev = 0 # nesting level of lists
c0 = '' # first character of current line
out = [] # list of processed lines
etags = [] # trailing tags
ltags = [] # level# correspondent to trailing tag
tlev = [] # list of tags for each level ('ul' or 'ol')
mtag = '' # marked tag (~last tag) ('l','.','h','p','t'). Used to set <br/>
# and to avoid <p></p> around tables and blockquotes
lineno = 0
strings_len = len(strings)
while lineno < strings_len:
s0 = strings[lineno][:1]
s = strings[lineno].strip()
""" # + - . ---------------------
## ++ -- .. ------- field | field | field <-title
### +++ --- ... quote =====================
#### ++++ ---- .... ------- field | field | field <-body
##### +++++ ----- ..... ---------------------:class[id]
"""
pc0=c0 # first character of previous line
c0=s[:1]
if c0: # for non empty strings
if c0 in "#+-.": # first character is one of: # + - .
(t1,t2,p,ss) = regex_list.findall(s)[0]
# t1 - tag ("###")
# t2 - tag ("+++", "---", "...")
# p - paragraph point ('.')->for "++." or "--."
# ss - other part of string
if t1 or t2:
# headers and lists:
if c0 == '#': # headers
(lev, mtag) = parse_title(t1, ss)
lineno+=1
continue
elif c0 == '+': # ordered list
(lev, mtag, lineno)= parse_list(t2, p, ss, 'ol', lev, mtag, lineno)
lineno+=1
continue
elif c0 == '-': # unordered list, table or blockquote
if p or ss:
(lev, mtag, lineno) = parse_list(t2, p, ss, 'ul', lev, mtag, lineno)
lineno+=1
continue
else:
(s, mtag, lineno) = parse_table_or_blockquote(s, mtag, lineno)
elif lev>0: # and c0 == '.' # paragraph in lists
(lev, mtag, lineno) = parse_point(t2, ss, lev, mtag, lineno)
lineno+=1
continue
if lev == 0 and (mtag == 'q' or s == META):
# new paragraph
pc0=''
if pc0 == '' or (mtag != 'p' and s0 not in (' ','\t')):
# paragraph
out.extend(etags[::-1])
etags=[]
ltags=[]
tlev=[]
lev=0
if br and mtag == 'p': out.append(br)
if mtag != 'q' and s != META:
if pend: etags=[pend]
out.append(pbeg)
mtag = 'p'
else:
mtag = ''
out.append(s)
else:
if lev>0 and mtag=='.' and s == META:
out.append(etags.pop())
ltags.pop()
out.append(s)
mtag = ''
else:
out.append(' '+s)
lineno+=1
out.extend(etags[::-1])
text = ''.join(out)
#############################################################
# do strong,em,del
#############################################################
text = regex_strong.sub('<strong>\g<t></strong>', text)
text = regex_del.sub('<del>\g<t></del>', text)
text = regex_em.sub('<em>\g<t></em>', text)
#############################################################
# deal with images, videos, audios and links
#############################################################
def sub_media(m):
t,a,k,p,w = m.group('t','a','k','p','w')
if not k:
return m.group(0)
k = escape(k)
t = t or ''
style = 'width:%s' % w if w else ''
title = ' title="%s"' % escape(a).replace(META, DISABLED_META) if a else ''
p_begin = p_end = ''
if p == 'center':
p_begin = '<p style="text-align:center">'
p_end = '</p>'+pp
elif p == 'blockleft':
p_begin = '<p style="text-align:left">'
p_end = '</p>'+pp
elif p == 'blockright':
p_begin = '<p style="text-align:right">'
p_end = '</p>'+pp
elif p in ('left','right'):
style = ('float:%s' % p)+(';%s' % style if style else '')
if t and regex_auto.match(t):
p_begin = p_begin + '<a href="%s">' % t
p_end = '</a>' + p_end
t = ''
if style:
style = ' style="%s"' % style
if p in ('video','audio'):
t = render(t, {}, {}, 'br', URL, environment, latex,
autolinks, protolinks, class_prefix, id_prefix, pretty_print)
return '<%(p)s controls="controls"%(title)s%(style)s><source src="%(k)s" />%(t)s</%(p)s>' \
% dict(p=p, title=title, style=style, k=k, t=t)
alt = ' alt="%s"'%escape(t).replace(META, DISABLED_META) if t else ''
return '%(begin)s<img src="%(k)s"%(alt)s%(title)s%(style)s />%(end)s' \
% dict(begin=p_begin, k=k, alt=alt, title=title, style=style, end=p_end)
def sub_link(m):
t,a,k,p = m.group('t','a','k','p')
if not k and not t:
return m.group(0)
t = t or ''
a = escape(a) if a else ''
if k:
if '#' in k and not ':' in k.split('#')[0]:
# wikipage, not external url
k=k.replace('#','#'+id_prefix)
k = escape(k)
title = ' title="%s"' % a.replace(META, DISABLED_META) if a else ''
target = ' target="_blank"' if p == 'popup' else ''
t = render(t, {}, {}, 'br', URL, environment, latex, None,
None, class_prefix, id_prefix, pretty_print) if t else k
return '<a href="%(k)s"%(title)s%(target)s>%(t)s</a>' \
% dict(k=k, title=title, target=target, t=t)
if t == 'NEWLINE' and not a:
return '<br />'+pp
return '<span class="anchor" id="%s">%s</span>' % (
escape(id_prefix+t),
render(a, {},{},'br', URL,
environment, latex, autolinks,
protolinks, class_prefix,
id_prefix, pretty_print))
parts = text.split(LINK)
text = parts[0]
for i,s in enumerate(links):
if s == None:
html = LINK
else:
html = regex_media_level2.sub(sub_media, s)
if html == s:
html = regex_link_level2.sub(sub_link, html)
if html == s:
# return unprocessed string as a signal of an error
html = '[[%s]]'%s
text += html + parts[i+1]
#############################################################
# process all code text
#############################################################
def expand_meta(m):
code,b,p,s = segments.pop(0)
if code==None or m.group() == DISABLED_META:
return escape(s)
if b in extra:
if code[:1]=='\n': code=code[1:]
if code[-1:]=='\n': code=code[:-1]
if p:
return str(extra[b](code,p))
else:
return str(extra[b](code))
elif b=='cite':
return '['+','.join('<a href="#%s" class="%s">%s</a>' \
% (d,b,d) \
for d in escape(code).split(','))+']'
elif b=='latex':
return LATEX % code.replace('"','\"').replace('\n',' ')
elif b in html_colors:
return '<span style="color: %s">%s</span>' \
% (b, render(code, {}, {}, 'br', URL, environment, latex,
autolinks, protolinks, class_prefix, id_prefix, pretty_print))
elif b in ('c', 'color') and p:
c=p.split(':')
fg='color: %s;' % c[0] if c[0] else ''
bg='background-color: %s;' % c[1] if len(c)>1 and c[1] else ''
return '<span style="%s%s">%s</span>' \
% (fg, bg, render(code, {}, {}, 'br', URL, environment, latex,
autolinks, protolinks, class_prefix, id_prefix, pretty_print))
cls = ' class="%s%s"'%(class_prefix,b) if b and b != 'id' else ''
id = ' id="%s%s"'%(id_prefix,escape(p)) if p else ''
beg=(code[:1]=='\n')
end=[None,-1][code[-1:]=='\n']
if beg and end:
return '<pre><code%s%s>%s</code></pre>%s' % (cls, id, escape(code[1:-1]), pp)
return '<code%s%s>%s</code>' % (cls, id, escape(code[beg:end]))
text = regex_expand_meta.sub(expand_meta, text)
if environment:
text = replace_components(text,environment)
return text.translate(ttab_out)
def markmin2html(text, extra={}, allowed={}, sep='p',
autolinks='default', protolinks='default',
class_prefix='', id_prefix='markmin_', pretty_print=False):
return render(text, extra, allowed, sep,
autolinks=autolinks, protolinks=protolinks,
class_prefix=class_prefix, id_prefix=id_prefix,
pretty_print=pretty_print)
def run_doctests():
import doctest
doctest.testmod()
if __name__ == '__main__':
import sys
import doctest
from textwrap import dedent
html=dedent("""
<!doctype html>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
%(style)s
<title>%(title)s</title>
</head>
<body>
%(body)s
</body>
</html>""")[1:]
if sys.argv[1:2] == ['-h']:
style=dedent("""
<style>
blockquote { background-color: #FFFAAE; padding: 7px; }
table { border-collapse: collapse; }
thead td { border-bottom: 1px solid; }
tfoot td { border-top: 1px solid; }
.tableclass1 { background-color: lime; }
.tableclass1 thead { color: yellow; background-color: green; }
.tableclass1 tfoot { color: yellow; background-color: green; }
.tableclass1 .even td { background-color: #80FF7F; }
.tableclass1 .first td {border-top: 1px solid; }
td.num { text-align: right; }
pre { background-color: #E0E0E0; padding: 5px; }
</style>""")[1:]
print html % dict(title="Markmin markup language",
style=style,
body=markmin2html(__doc__, pretty_print=True))
elif sys.argv[1:2] == ['-t']:
from timeit import Timer
loops=1000
ts = Timer("markmin2html(__doc__)","from markmin2html import markmin2html")
print 'timeit "markmin2html(__doc__)":'
t = min([ts.timeit(loops) for i in range(3)])
print "%s loops, best of 3: %.3f ms per loop" % (loops, t/1000*loops)
elif len(sys.argv) > 1:
fargv = open(sys.argv[1],'r')
try:
markmin_text=fargv.read()
# embed css file from second parameter into html file
if len(sys.argv) > 2:
if sys.argv[2].startswith('@'):
markmin_style = '<link rel="stylesheet" href="'+sys.argv[2][1:]+'"/>'
else:
fargv2 = open(sys.argv[2],'r')
try:
markmin_style = "<style>\n" + fargv2.read() + "</style>"
finally:
fargv2.close()
else:
markmin_style = ""
print html % dict(title=sys.argv[1], style=markmin_style,
body=markmin2html(markmin_text, pretty_print=True))
finally:
fargv.close()
else:
print "Usage: "+sys.argv[0]+" -h | -t | file.markmin [file.css|@path_to/css]"
print "where: -h - print __doc__"
print " -t - timeit __doc__ (for testing purpuse only)"
print " file.markmin [file.css] - process file.markmin + built in file.css (optional)"
print " file.markmin [@path_to/css] - process file.markmin + link path_to/css (optional)"
run_doctests()
| Python |
#!/usr/bin/env python
# created my Massimo Di Pierro
# license MIT/BSD/GPL
import re
import cgi
import sys
import doctest
from optparse import OptionParser
__all__ = ['render','markmin2latex']
META = 'META'
regex_newlines = re.compile('(\n\r)|(\r\n)')
regex_dd=re.compile('\$\$(?P<latex>.*?)\$\$')
regex_code = re.compile('('+META+')|(``(?P<t>.*?)``(:(?P<c>\w+))?)',re.S)
regex_title = re.compile('^#{1} (?P<t>[^\n]+)',re.M)
regex_maps = [
(re.compile('[ \t\r]+\n'),'\n'),
(re.compile('[ \t\r]+\n'),'\n'),
(re.compile('\*\*(?P<t>[^\s\*]+( +[^\s\*]+)*)\*\*'),'{\\\\bf \g<t>}'),
(re.compile("''(?P<t>[^\s']+( +[^\s']+)*)''"),'{\\it \g<t>}'),
(re.compile('^#{6} (?P<t>[^\n]+)',re.M),'\n\n{\\\\bf \g<t>}\n'),
(re.compile('^#{5} (?P<t>[^\n]+)',re.M),'\n\n{\\\\bf \g<t>}\n'),
(re.compile('^#{4} (?P<t>[^\n]+)',re.M),'\n\n\\\\goodbreak\\subsubsection{\g<t>}\n'),
(re.compile('^#{3} (?P<t>[^\n]+)',re.M),'\n\n\\\\goodbreak\\subsection{\g<t>}\n'),
(re.compile('^#{2} (?P<t>[^\n]+)',re.M),'\n\n\\\\goodbreak\\section{\g<t>}\n'),
(re.compile('^#{1} (?P<t>[^\n]+)',re.M),''),
(re.compile('^\- +(?P<t>.*)',re.M),'\\\\begin{itemize}\n\\item \g<t>\n\\end{itemize}'),
(re.compile('^\+ +(?P<t>.*)',re.M),'\\\\begin{itemize}\n\\item \g<t>\n\\end{itemize}'),
(re.compile('\\\\end\{itemize\}\s+\\\\begin\{itemize\}'),'\n'),
(re.compile('\n\s+\n'),'\n\n')]
regex_table = re.compile('^\-{4,}\n(?P<t>.*?)\n\-{4,}(:(?P<c>\w+))?\n',re.M|re.S)
regex_anchor = re.compile('\[\[(?P<t>\S+)\]\]')
regex_bibitem = re.compile('\-\s*\[\[(?P<t>\S+)\]\]')
regex_image_width = re.compile('\[\[(?P<t>[^\]]*?) +(?P<k>\S+) +(?P<p>left|right|center) +(?P<w>\d+px)\]\]')
regex_image = re.compile('\[\[(?P<t>[^\]]*?) +(?P<k>\S+) +(?P<p>left|right|center)\]\]')
#regex_video = re.compile('\[\[(?P<t>[^\]]*?) +(?P<k>\S+) +video\]\]')
#regex_audio = re.compile('\[\[(?P<t>[^\]]*?) +(?P<k>\S+) +audio\]\]')
regex_link = re.compile('\[\[(?P<t>[^\]]*?) +(?P<k>\S+)\]\]')
regex_auto = re.compile('(?<!["\w])(?P<k>\w+://[\w\.\-\?&%\:]+)',re.M)
regex_commas = re.compile('[ ]+(?P<t>[,;\.])')
regex_noindent = re.compile('\n\n(?P<t>[a-z])')
#regex_quote_left = re.compile('"(?=\w)')
#regex_quote_right = re.compile('(?=\w\.)"')
def latex_escape(text,pound=True):
text=text.replace('\\','{\\textbackslash}')
for c in '^_&$%{}': text=text.replace(c,'\\'+c)
text=text.replace('\\{\\textbackslash\\}','{\\textbackslash}')
if pound: text=text.replace('#','\\#')
return text
def render(text,
extra={},
allowed={},
sep='p',
image_mapper=lambda x:x,
chapters=False):
#############################################################
# replace all blocks marked with ``...``:class with META
# store them into segments they will be treated as code
#############################################################
text = str(text or '')
segments, i = [], 0
text = regex_dd.sub('``\g<latex>``:latex ',text)
text = regex_newlines.sub('\n',text)
while True:
item = regex_code.search(text,i)
if not item: break
if item.group()==META:
segments.append((None,None))
text = text[:item.start()]+META+text[item.end():]
else:
c = item.group('c') or ''
if 'code' in allowed and not c in allowed['code']: c = ''
code = item.group('t').replace('!`!','`')
segments.append((code,c))
text = text[:item.start()]+META+text[item.end():]
i=item.start()+3
#############################################################
# do h1,h2,h3,h4,h5,h6,b,i,ol,ul and normalize spaces
#############################################################
title = regex_title.search(text)
if not title: title='Title'
else: title=title.group('t')
text = latex_escape(text,pound=False)
texts = text.split('## References',1)
text = regex_anchor.sub('\\label{\g<t>}', texts[0])
if len(texts)==2:
text += '\n\\begin{thebibliography}{999}\n'
text += regex_bibitem.sub('\n\\\\bibitem{\g<t>}', texts[1])
text += '\n\\end{thebibliography}\n'
text = '\n'.join(t.strip() for t in text.split('\n'))
for regex, sub in regex_maps:
text = regex.sub(sub,text)
text=text.replace('#','\\#')
text=text.replace('`',"'")
#############################################################
# process tables and blockquotes
#############################################################
while True:
item = regex_table.search(text)
if not item: break
c = item.group('c') or ''
if 'table' in allowed and not c in allowed['table']: c = ''
content = item.group('t')
if ' | ' in content:
rows = content.replace('\n','\\\\\n').replace(' | ',' & ')
row0,row2 = rows.split('\\\\\n',1)
cols=row0.count(' & ')+1
cal='{'+''.join('l' for j in range(cols))+'}'
tabular = '\\begin{center}\n{\\begin{tabular}'+cal+'\\hline\n' + row0+'\\\\ \\hline\n'+row2 + ' \\\\ \\hline\n\\end{tabular}}\n\\end{center}'
if row2.count('\n')>20: tabular='\\newpage\n'+tabular
text = text[:item.start()] + tabular + text[item.end():]
else:
text = text[:item.start()] + '\\begin{quote}' + content + '\\end{quote}' + text[item.end():]
#############################################################
# deal with images, videos, audios and links
#############################################################
def sub(x):
f=image_mapper(x.group('k'))
if not f: return None
return '\n\\begin{center}\\includegraphics[width=8cm]{%s}\\end{center}\n' % (f)
text = regex_image_width.sub(sub,text)
text = regex_image.sub(sub,text)
text = regex_link.sub('{\\\\footnotesize\\href{\g<k>}{\g<t>}}', text)
text = regex_commas.sub('\g<t>',text)
text = regex_noindent.sub('\n\\\\noindent \g<t>',text)
### fix paths in images
regex=re.compile('\\\\_\w*\.(eps|png|jpg|gif)')
while True:
match=regex.search(text)
if not match: break
text=text[:match.start()]+text[match.start()+1:]
#text = regex_quote_left.sub('``',text)
#text = regex_quote_right.sub("''",text)
if chapters:
text=text.replace(r'\section*{',r'\chapter*{')
text=text.replace(r'\section{',r'\chapter{')
text=text.replace(r'subsection{',r'section{')
#############################################################
# process all code text
#############################################################
parts = text.split(META)
text = parts[0]
authors = []
for i,(code,b) in enumerate(segments):
if code==None:
html = META
else:
if b=='hidden':
html=''
elif b=='author':
author = latex_escape(code.strip())
authors.append(author)
html=''
elif b=='inxx':
html='\inxx{%s}' % latex_escape(code)
elif b=='cite':
html='~\cite{%s}' % latex_escape(code.strip())
elif b=='ref':
html='~\ref{%s}' % latex_escape(code.strip())
elif b=='latex':
if '\n' in code:
html='\n\\begin{equation}\n%s\n\\end{equation}\n' % code.strip()
else:
html='$%s$' % code.strip()
elif b=='latex_eqnarray':
code=code.strip()
code='\\\\'.join(x.replace('=','&=&',1) for x in code.split('\\\\'))
html='\n\\begin{eqnarray}\n%s\n\\end{eqnarray}\n' % code
elif b.startswith('latex_'):
key=b[6:]
html='\\begin{%s}%s\\end{%s}' % (key,code,key)
elif b in extra:
if code[:1]=='\n': code=code[1:]
if code[-1:]=='\n': code=code[:-1]
html = extra[b](code)
elif code[:1]=='\n' or code[:-1]=='\n':
if code[:1]=='\n': code=code[1:]
if code[-1:]=='\n': code=code[:-1]
if code.startswith('<') or code.startswith('{{') or code.startswith('http'):
html = '\\begin{lstlisting}[keywords={}]\n%s\n\\end{lstlisting}' % code
else:
html = '\\begin{lstlisting}\n%s\n\\end{lstlisting}' % code
else:
if code[:1]=='\n': code=code[1:]
if code[-1:]=='\n': code=code[:-1]
html = '{\\ft %s}' % latex_escape(code)
try:
text = text+html+parts[i+1]
except:
text = text + '... WIKI PROCESSING ERROR ...'
break
text = text.replace(' ~\\cite','~\\cite')
return text, title, authors
WRAPPER = """
\\documentclass[12pt]{article}
\\usepackage{hyperref}
\\usepackage{listings}
\\usepackage{upquote}
\\usepackage{color}
\\usepackage{graphicx}
\\usepackage{grffile}
\\usepackage[utf8x]{inputenc}
\\definecolor{lg}{rgb}{0.9,0.9,0.9}
\\definecolor{dg}{rgb}{0.3,0.3,0.3}
\\def\\ft{\\small\\tt}
\\lstset{
basicstyle=\\footnotesize,
breaklines=true, basicstyle=\\ttfamily\\color{black}\\footnotesize,
keywordstyle=\\bf\\ttfamily,
commentstyle=\\it\\ttfamily,
stringstyle=\\color{dg}\\it\\ttfamily,
numbers=left, numberstyle=\\color{dg}\\tiny, stepnumber=1, numbersep=5pt,
backgroundcolor=\\color{lg}, tabsize=4, showspaces=false,
showstringspaces=false
}
\\title{%(title)s}
\\author{%(author)s}
\\begin{document}
\\maketitle
\\tableofcontents
\\newpage
%(body)s
\\end{document}
"""
def markmin2latex(data, image_mapper=lambda x:x, extra={},
wrapper=WRAPPER):
body, title, authors = render(data, extra=extra, image_mapper=image_mapper)
author = '\n\\and\n'.join(a.replace('\n','\\\\\n\\footnotesize ') for a in authors)
return wrapper % dict(title=title, author=author, body=body)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-i", "--info", dest="info",
help="markmin help")
parser.add_option("-t", "--test", dest="test", action="store_true",
default=False)
parser.add_option("-n", "--no_wrapper", dest="no_wrapper",
action="store_true",default=False)
parser.add_option("-c", "--chapters", dest="chapters",action="store_true",
default=False,help="switch section for chapter")
parser.add_option("-w", "--wrapper", dest="wrapper", default=False,
help="latex file containing header and footer")
(options, args) = parser.parse_args()
if options.info:
import markmin2html
markmin2latex(markmin2html.__doc__)
elif options.test:
doctest.testmod()
else:
if options.wrapper:
fwrapper = open(options.wrapper,'rb')
try:
wrapper = fwrapper.read()
finally:
fwrapper.close()
elif options.no_wrapper:
wrapper = '%(body)s'
else:
wrapper = WRAPPER
for f in args:
fargs = open(f,'r')
content_data = []
try:
content_data.append(fargs.read())
finally:
fargs.close()
content = '\n'.join(content_data)
output= markmin2latex(content,
wrapper=wrapper,
chapters=options.chapters)
print output
| Python |
"""
Created by Massimo Di Pierro
Licese BSD
"""
import subprocess
import os
import os.path
import re
import sys
from tempfile import mkstemp, mkdtemp, NamedTemporaryFile
from markmin2latex import markmin2latex
__all__ = ['markmin2pdf']
def removeall(path):
ERROR_STR= """Error removing %(path)s, %(error)s """
def rmgeneric(path, __func__):
try:
__func__(path)
except OSError, (errno, strerror):
print ERROR_STR % {'path' : path, 'error': strerror }
files=[path]
while files:
file=files[0]
if os.path.isfile(file):
f=os.remove
rmgeneric(file, os.remove)
del files[0]
elif os.path.isdir(file):
nested = os.listdir(file)
if not nested:
rmgeneric(file, os.rmdir)
del files[0]
else:
files = [os.path.join(file,x) for x in nested] + files
def latex2pdf(latex, pdflatex='pdflatex', passes=3):
"""
calls pdflatex in a tempfolder
Arguments:
- pdflatex: path to the pdflatex command. Default is just 'pdflatex'.
- passes: defines how often pdflates should be run in the texfile.
"""
pdflatex=pdflatex
passes=passes
warnings=[]
# setup the envoriment
tmpdir = mkdtemp()
texfile = open(tmpdir+'/test.tex','wb')
texfile.write(latex)
texfile.seek(0)
texfile.close()
texfile = os.path.abspath(texfile.name)
# start doing some work
for i in range(0, passes):
logfd,logname = mkstemp()
outfile=os.fdopen(logfd)
try:
ret = subprocess.call([pdflatex,
'-interaction=nonstopmode',
'-output-format', 'pdf',
'-output-directory', tmpdir,
texfile],
cwd=os.path.dirname(texfile), stdout=outfile,
stderr=subprocess.PIPE)
finally:
outfile.close()
re_errors=re.compile('^\!(.*)$',re.M)
re_warnings=re.compile('^LaTeX Warning\:(.*)$',re.M)
flog = open(logname)
try:
loglines = flog.read()
finally:
flog.close()
errors=re_errors.findall(loglines)
warnings=re_warnings.findall(loglines)
os.unlink(logname)
pdffile=texfile.rsplit('.',1)[0]+'.pdf'
if os.path.isfile(pdffile):
fpdf = open(pdffile, 'rb')
try:
data = fpdf.read()
finally:
fpdf.close()
else:
data = None
removeall(tmpdir)
return data, warnings, errors
def markmin2pdf(text, image_mapper=lambda x: None, extra={}):
return latex2pdf(markmin2latex(text,image_mapper=image_mapper, extra=extra))
if __name__ == '__main__':
import sys
import doctest
import markmin2html
if sys.argv[1:2]==['-h']:
data, warnings, errors = markmin2pdf(markmin2html.__doc__)
if errors:
print 'ERRORS:'+'\n'.join(errors)
print 'WARNGINS:'+'\n'.join(warnings)
else:
print data
elif len(sys.argv)>1:
fargv = open(sys.argv[1],'rb')
try:
data, warnings, errors = markmin2pdf(fargv.read())
finally:
fargv.close()
if errors:
print 'ERRORS:'+'\n'.join(errors)
print 'WARNGINS:'+'\n'.join(warnings)
else:
print data
else:
doctest.testmod()
| Python |
# this file exists for backward compatibility
__all__ = ['DAL', 'Field', 'drivers', 'gae']
from gluon.dal import DAL, Field, Table, Query, Set, Expression, Row, Rows, drivers, BaseAdapter, SQLField, SQLTable, SQLXorable, SQLQuery, SQLSet, SQLRows, SQLStorage, SQLDB, GQLDB, SQLALL, SQLCustomType, gae
| Python |
from gluon import XML
def button(merchant_id="123456789012345",
products=[dict(name="shoes",
quantity=1,
price=23.5,
currency='USD',
description="running shoes black")]):
t = '<input name="item_%(key)s_%(k)s" type="hidden" value="%(value)s"/>\n'
list_products = ''
for k, product in enumerate(products):
for key in ('name','description','quantity','price','currency'):
list_products += t % dict(k=k + 1, key=key, value=product[key])
button = """<form action="https://checkout.google.com/api/checkout/v2/checkoutForm/Merchant/%(merchant_id)s" id="BB_BuyButtonForm" method="post" name="BB_BuyButtonForm" target="_top">\n%(list_products)s<input name="_charset_" type="hidden" value="utf-8"/>\n<input alt="" src="https://checkout.google.com/buttons/buy.gif?merchant_id=%(merchant_id)s&w=117&h=48&style=white&variant=text&loc=en_US" type="image"/>\n</form>""" % dict(merchant_id=merchant_id, list_products=list_products)
return XML(button)
| Python |
# -*- coding: utf-8 -*-
import struct
import re
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from err import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError
insert_values = re.compile(r'\svalues\s*(\(.+\))', re.IGNORECASE)
class Cursor(object):
'''
This is the object you use to interact with the database.
'''
def __init__(self, connection):
'''
Do not create an instance of a Cursor yourself. Call
connections.Connection.cursor().
'''
from weakref import proxy
self.connection = proxy(connection)
self.description = None
self.rownumber = 0
self.rowcount = -1
self.arraysize = 1
self._executed = None
self.messages = []
self.errorhandler = connection.errorhandler
self._has_next = None
self._rows = ()
def __del__(self):
'''
When this gets GC'd close it.
'''
self.close()
def close(self):
'''
Closing a cursor just exhausts all remaining data.
'''
if not self.connection:
return
try:
while self.nextset():
pass
except:
pass
self.connection = None
def _get_db(self):
if not self.connection:
self.errorhandler(self, ProgrammingError, "cursor closed")
return self.connection
def _check_executed(self):
if not self._executed:
self.errorhandler(self, ProgrammingError, "execute() first")
def setinputsizes(self, *args):
"""Does nothing, required by DB API."""
def setoutputsizes(self, *args):
"""Does nothing, required by DB API."""
def nextset(self):
''' Get the next query set '''
if self._executed:
self.fetchall()
del self.messages[:]
if not self._has_next:
return None
connection = self._get_db()
connection.next_result()
self._do_get_result()
return True
def execute(self, query, args=None):
''' Execute a query '''
from sys import exc_info
conn = self._get_db()
charset = conn.charset
del self.messages[:]
# TODO: make sure that conn.escape is correct
if isinstance(query, unicode):
query = query.encode(charset)
if args is not None:
if isinstance(args, tuple) or isinstance(args, list):
escaped_args = tuple(conn.escape(arg) for arg in args)
elif isinstance(args, dict):
escaped_args = dict((key, conn.escape(val)) for (key, val) in args.items())
else:
#If it's not a dictionary let's try escaping it anyways.
#Worst case it will throw a Value error
escaped_args = conn.escape(args)
query = query % escaped_args
result = 0
try:
result = self._query(query)
except:
exc, value, tb = exc_info()
del tb
self.messages.append((exc,value))
self.errorhandler(self, exc, value)
self._executed = query
return result
def executemany(self, query, args):
''' Run several data against one query '''
del self.messages[:]
#conn = self._get_db()
if not args:
return
#charset = conn.charset
#if isinstance(query, unicode):
# query = query.encode(charset)
self.rowcount = sum([ self.execute(query, arg) for arg in args ])
return self.rowcount
def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
procname -- string, name of procedure to execute on server
args -- Sequence of parameters to use with procedure
Returns the original args.
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
"""
conn = self._get_db()
for index, arg in enumerate(args):
q = "SET @_%s_%d=%s" % (procname, index, conn.escape(arg))
if isinstance(q, unicode):
q = q.encode(conn.charset)
self._query(q)
self.nextset()
q = "CALL %s(%s)" % (procname,
','.join(['@_%s_%d' % (procname, i)
for i in range(len(args))]))
if isinstance(q, unicode):
q = q.encode(conn.charset)
self._query(q)
self._executed = q
return args
def fetchone(self):
''' Fetch the next row '''
self._check_executed()
if self._rows is None or self.rownumber >= len(self._rows):
return None
result = self._rows[self.rownumber]
self.rownumber += 1
return result
def fetchmany(self, size=None):
''' Fetch several rows '''
self._check_executed()
end = self.rownumber + (size or self.arraysize)
result = self._rows[self.rownumber:end]
if self._rows is None:
return None
self.rownumber = min(end, len(self._rows))
return result
def fetchall(self):
''' Fetch all the rows '''
self._check_executed()
if self._rows is None:
return None
if self.rownumber:
result = self._rows[self.rownumber:]
else:
result = self._rows
self.rownumber = len(self._rows)
return result
def scroll(self, value, mode='relative'):
self._check_executed()
if mode == 'relative':
r = self.rownumber + value
elif mode == 'absolute':
r = value
else:
self.errorhandler(self, ProgrammingError,
"unknown scroll mode %s" % mode)
if r < 0 or r >= len(self._rows):
self.errorhandler(self, IndexError, "out of range")
self.rownumber = r
def _query(self, q):
conn = self._get_db()
self._last_executed = q
conn.query(q)
self._do_get_result()
return self.rowcount
def _do_get_result(self):
conn = self._get_db()
self.rowcount = conn._result.affected_rows
self.rownumber = 0
self.description = conn._result.description
self.lastrowid = conn._result.insert_id
self._rows = conn._result.rows
self._has_next = conn._result.has_next
def __iter__(self):
return iter(self.fetchone, None)
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
class DictCursor(Cursor):
"""A cursor which returns results as a dictionary"""
def execute(self, query, args=None):
result = super(DictCursor, self).execute(query, args)
if self.description:
self._fields = [ field[0] for field in self.description ]
return result
def fetchone(self):
''' Fetch the next row '''
self._check_executed()
if self._rows is None or self.rownumber >= len(self._rows):
return None
result = dict(zip(self._fields, self._rows[self.rownumber]))
self.rownumber += 1
return result
def fetchmany(self, size=None):
''' Fetch several rows '''
self._check_executed()
if self._rows is None:
return None
end = self.rownumber + (size or self.arraysize)
result = [ dict(zip(self._fields, r)) for r in self._rows[self.rownumber:end] ]
self.rownumber = min(end, len(self._rows))
return tuple(result)
def fetchall(self):
''' Fetch all the rows '''
self._check_executed()
if self._rows is None:
return None
if self.rownumber:
result = [ dict(zip(self._fields, r)) for r in self._rows[self.rownumber:] ]
else:
result = [ dict(zip(self._fields, r)) for r in self._rows ]
self.rownumber = len(self._rows)
return tuple(result)
class SSCursor(Cursor):
"""
Unbuffered Cursor, mainly useful for queries that return a lot of data,
or for connections to remote servers over a slow network.
Instead of copying every row of data into a buffer, this will fetch
rows as needed. The upside of this, is the client uses much less memory,
and rows are returned much faster when traveling over a slow network,
or if the result set is very big.
There are limitations, though. The MySQL protocol doesn't support
returning the total number of rows, so the only way to tell how many rows
there are is to iterate over every row returned. Also, it currently isn't
possible to scroll backwards, as only the current row is held in memory.
"""
def close(self):
conn = self._get_db()
conn._result._finish_unbuffered_query()
try:
if self._has_next:
while self.nextset(): pass
except: pass
def _query(self, q):
conn = self._get_db()
self._last_executed = q
conn.query(q, unbuffered=True)
self._do_get_result()
return self.rowcount
def read_next(self):
""" Read next row """
conn = self._get_db()
conn._result._read_rowdata_packet_unbuffered()
return conn._result.rows
def fetchone(self):
""" Fetch next row """
self._check_executed()
row = self.read_next()
if row is None:
return None
self.rownumber += 1
return row
def fetchall(self):
"""
Fetch all, as per MySQLdb. Pretty useless for large queries, as
it is buffered. See fetchall_unbuffered(), if you want an unbuffered
generator version of this method.
"""
rows = []
while True:
row = self.fetchone()
if row is None:
break
rows.append(row)
return tuple(rows)
def fetchall_unbuffered(self):
"""
Fetch all, implemented as a generator, which isn't to standard,
however, it doesn't make sense to return everything in a list, as that
would use ridiculous memory for large result sets.
"""
row = self.fetchone()
while row is not None:
yield row
row = self.fetchone()
def fetchmany(self, size=None):
""" Fetch many """
self._check_executed()
if size is None:
size = self.arraysize
rows = []
for i in range(0, size):
row = self.read_next()
if row is None:
break
rows.append(row)
self.rownumber += 1
return tuple(rows)
def scroll(self, value, mode='relative'):
self._check_executed()
if not mode == 'relative' and not mode == 'absolute':
self.errorhandler(self, ProgrammingError,
"unknown scroll mode %s" % mode)
if mode == 'relative':
if value < 0:
self.errorhandler(self, NotSupportedError,
"Backwards scrolling not supported by this cursor")
for i in range(0, value): self.read_next()
self.rownumber += value
else:
if value < self.rownumber:
self.errorhandler(self, NotSupportedError,
"Backwards scrolling not supported by this cursor")
end = value - self.rownumber
for i in range(0, end): self.read_next()
self.rownumber = value
| Python |
import re
import datetime
import time
import sys
from constants import FIELD_TYPE, FLAG
from charset import charset_by_id
PYTHON3 = sys.version_info[0] > 2
try:
set
except NameError:
try:
from sets import BaseSet as set
except ImportError:
from sets import Set as set
ESCAPE_REGEX = re.compile(r"[\0\n\r\032\'\"\\]")
ESCAPE_MAP = {'\0': '\\0', '\n': '\\n', '\r': '\\r', '\032': '\\Z',
'\'': '\\\'', '"': '\\"', '\\': '\\\\'}
def escape_item(val, charset):
if type(val) in [tuple, list, set]:
return escape_sequence(val, charset)
if type(val) is dict:
return escape_dict(val, charset)
if PYTHON3 and hasattr(val, "decode") and not isinstance(val, unicode):
# deal with py3k bytes
val = val.decode(charset)
encoder = encoders[type(val)]
val = encoder(val)
if type(val) in [str, int]:
return val
val = val.encode(charset)
return val
def escape_dict(val, charset):
n = {}
for k, v in val.items():
quoted = escape_item(v, charset)
n[k] = quoted
return n
def escape_sequence(val, charset):
n = []
for item in val:
quoted = escape_item(item, charset)
n.append(quoted)
return "(" + ",".join(n) + ")"
def escape_set(val, charset):
val = map(lambda x: escape_item(x, charset), val)
return ','.join(val)
def escape_bool(value):
return str(int(value))
def escape_object(value):
return str(value)
def escape_int(value):
return value
escape_long = escape_object
def escape_float(value):
return ('%.15g' % value)
def escape_string(value):
return ("'%s'" % ESCAPE_REGEX.sub(
lambda match: ESCAPE_MAP.get(match.group(0)), value))
def escape_unicode(value):
return escape_string(value)
def escape_None(value):
return 'NULL'
def escape_timedelta(obj):
seconds = int(obj.seconds) % 60
minutes = int(obj.seconds // 60) % 60
hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24
return escape_string('%02d:%02d:%02d' % (hours, minutes, seconds))
def escape_time(obj):
s = "%02d:%02d:%02d" % (int(obj.hour), int(obj.minute),
int(obj.second))
if obj.microsecond:
s += ".%f" % obj.microsecond
return escape_string(s)
def escape_datetime(obj):
return escape_string(obj.strftime("%Y-%m-%d %H:%M:%S"))
def escape_date(obj):
return escape_string(obj.strftime("%Y-%m-%d"))
def escape_struct_time(obj):
return escape_datetime(datetime.datetime(*obj[:6]))
def convert_datetime(connection, field, obj):
"""Returns a DATETIME or TIMESTAMP column value as a datetime object:
>>> datetime_or_None('2007-02-25 23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
>>> datetime_or_None('2007-02-25T23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
Illegal values are returned as None:
>>> datetime_or_None('2007-02-31T23:06:20') is None
True
>>> datetime_or_None('0000-00-00 00:00:00') is None
True
"""
if not isinstance(obj, unicode):
obj = obj.decode(connection.charset)
if ' ' in obj:
sep = ' '
elif 'T' in obj:
sep = 'T'
else:
return convert_date(connection, field, obj)
try:
ymd, hms = obj.split(sep, 1)
return datetime.datetime(*[ int(x) for x in ymd.split('-')+hms.split(':') ])
except ValueError:
return convert_date(connection, field, obj)
def convert_timedelta(connection, field, obj):
"""Returns a TIME column as a timedelta object:
>>> timedelta_or_None('25:06:17')
datetime.timedelta(1, 3977)
>>> timedelta_or_None('-25:06:17')
datetime.timedelta(-2, 83177)
Illegal values are returned as None:
>>> timedelta_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
"""
try:
microseconds = 0
if not isinstance(obj, unicode):
obj = obj.decode(connection.charset)
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = int(tail)
hours, minutes, seconds = obj.split(':')
tdelta = datetime.timedelta(
hours = int(hours),
minutes = int(minutes),
seconds = int(seconds),
microseconds = microseconds
)
return tdelta
except ValueError:
return None
def convert_time(connection, field, obj):
"""Returns a TIME column as a time object:
>>> time_or_None('15:06:17')
datetime.time(15, 6, 17)
Illegal values are returned as None:
>>> time_or_None('-25:06:17') is None
True
>>> time_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
Also note that MySQL's TIME column corresponds more closely to
Python's timedelta and not time. However if you want TIME columns
to be treated as time-of-day and not a time offset, then you can
use set this function as the converter for FIELD_TYPE.TIME.
"""
try:
microseconds = 0
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = int(tail)
hours, minutes, seconds = obj.split(':')
return datetime.time(hour=int(hours), minute=int(minutes),
second=int(seconds), microsecond=microseconds)
except ValueError:
return None
def convert_date(connection, field, obj):
"""Returns a DATE column as a date object:
>>> date_or_None('2007-02-26')
datetime.date(2007, 2, 26)
Illegal values are returned as None:
>>> date_or_None('2007-02-31') is None
True
>>> date_or_None('0000-00-00') is None
True
"""
try:
if not isinstance(obj, unicode):
obj = obj.decode(connection.charset)
return datetime.date(*[ int(x) for x in obj.split('-', 2) ])
except ValueError:
return None
def convert_mysql_timestamp(connection, field, timestamp):
"""Convert a MySQL TIMESTAMP to a Timestamp object.
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
>>> mysql_timestamp_converter('2007-02-25 22:32:17')
datetime.datetime(2007, 2, 25, 22, 32, 17)
MySQL < 4.1 uses a big string of numbers:
>>> mysql_timestamp_converter('20070225223217')
datetime.datetime(2007, 2, 25, 22, 32, 17)
Illegal values are returned as None:
>>> mysql_timestamp_converter('2007-02-31 22:32:17') is None
True
>>> mysql_timestamp_converter('00000000000000') is None
True
"""
if not isinstance(timestamp, unicode):
timestamp = timestamp.decode(connection.charset)
if timestamp[4] == '-':
return convert_datetime(connection, field, timestamp)
timestamp += "0"*(14-len(timestamp)) # padding
year, month, day, hour, minute, second = \
int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \
int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14])
try:
return datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
def convert_set(s):
return set(s.split(","))
def convert_bit(connection, field, b):
#b = "\x00" * (8 - len(b)) + b # pad w/ zeroes
#return struct.unpack(">Q", b)[0]
#
# the snippet above is right, but MySQLdb doesn't process bits,
# so we shouldn't either
return b
def convert_characters(connection, field, data):
field_charset = charset_by_id(field.charsetnr).name
if field.flags & FLAG.SET:
return convert_set(data.decode(field_charset))
if field.flags & FLAG.BINARY:
return data
if connection.use_unicode:
data = data.decode(field_charset)
elif connection.charset != field_charset:
data = data.decode(field_charset)
data = data.encode(connection.charset)
return data
def convert_int(connection, field, data):
return int(data)
def convert_long(connection, field, data):
return long(data)
def convert_float(connection, field, data):
return float(data)
encoders = {
bool: escape_bool,
int: escape_int,
long: escape_long,
float: escape_float,
str: escape_string,
unicode: escape_unicode,
tuple: escape_sequence,
list:escape_sequence,
set:escape_sequence,
dict:escape_dict,
type(None):escape_None,
datetime.date: escape_date,
datetime.datetime : escape_datetime,
datetime.timedelta : escape_timedelta,
datetime.time : escape_time,
time.struct_time : escape_struct_time,
}
decoders = {
FIELD_TYPE.BIT: convert_bit,
FIELD_TYPE.TINY: convert_int,
FIELD_TYPE.SHORT: convert_int,
FIELD_TYPE.LONG: convert_long,
FIELD_TYPE.FLOAT: convert_float,
FIELD_TYPE.DOUBLE: convert_float,
FIELD_TYPE.DECIMAL: convert_float,
FIELD_TYPE.NEWDECIMAL: convert_float,
FIELD_TYPE.LONGLONG: convert_long,
FIELD_TYPE.INT24: convert_int,
FIELD_TYPE.YEAR: convert_int,
FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp,
FIELD_TYPE.DATETIME: convert_datetime,
FIELD_TYPE.TIME: convert_timedelta,
FIELD_TYPE.DATE: convert_date,
FIELD_TYPE.SET: convert_set,
FIELD_TYPE.BLOB: convert_characters,
FIELD_TYPE.TINY_BLOB: convert_characters,
FIELD_TYPE.MEDIUM_BLOB: convert_characters,
FIELD_TYPE.LONG_BLOB: convert_characters,
FIELD_TYPE.STRING: convert_characters,
FIELD_TYPE.VAR_STRING: convert_characters,
FIELD_TYPE.VARCHAR: convert_characters,
#FIELD_TYPE.BLOB: str,
#FIELD_TYPE.STRING: str,
#FIELD_TYPE.VAR_STRING: str,
#FIELD_TYPE.VARCHAR: str
}
conversions = decoders # for MySQLdb compatibility
try:
# python version > 2.3
from decimal import Decimal
def convert_decimal(connection, field, data):
data = data.decode(connection.charset)
return Decimal(data)
decoders[FIELD_TYPE.DECIMAL] = convert_decimal
decoders[FIELD_TYPE.NEWDECIMAL] = convert_decimal
def escape_decimal(obj):
return unicode(obj)
encoders[Decimal] = escape_decimal
except ImportError:
pass
| Python |
from time import localtime
from datetime import date, datetime, time, timedelta
Date = date
Time = time
TimeDelta = timedelta
Timestamp = datetime
def DateFromTicks(ticks):
return date(*localtime(ticks)[:3])
def TimeFromTicks(ticks):
return time(*localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return datetime(*localtime(ticks)[:6])
| Python |
import struct
try:
StandardError, Warning
except ImportError:
try:
from exceptions import StandardError, Warning
except ImportError:
import sys
e = sys.modules['exceptions']
StandardError = e.StandardError
Warning = e.Warning
from constants import ER
import sys
class MySQLError(StandardError):
"""Exception related to operation with MySQL."""
class Warning(Warning, MySQLError):
"""Exception raised for important warnings like data truncations
while inserting, etc."""
class Error(MySQLError):
"""Exception that is the base class of all other error exceptions
(not Warning)."""
class InterfaceError(Error):
"""Exception raised for errors that are related to the database
interface rather than the database itself."""
class DatabaseError(Error):
"""Exception raised for errors that are related to the
database."""
class DataError(DatabaseError):
"""Exception raised for errors that are due to problems with the
processed data like division by zero, numeric value out of range,
etc."""
class OperationalError(DatabaseError):
"""Exception raised for errors that are related to the database's
operation and not necessarily under the control of the programmer,
e.g. an unexpected disconnect occurs, the data source name is not
found, a transaction could not be processed, a memory allocation
error occurred during processing, etc."""
class IntegrityError(DatabaseError):
"""Exception raised when the relational integrity of the database
is affected, e.g. a foreign key check fails, duplicate key,
etc."""
class InternalError(DatabaseError):
"""Exception raised when the database encounters an internal
error, e.g. the cursor is not valid anymore, the transaction is
out of sync, etc."""
class ProgrammingError(DatabaseError):
"""Exception raised for programming errors, e.g. table not found
or already exists, syntax error in the SQL statement, wrong number
of parameters specified, etc."""
class NotSupportedError(DatabaseError):
"""Exception raised in case a method or database API was used
which is not supported by the database, e.g. requesting a
.rollback() on a connection that does not support transaction or
has transactions turned off."""
error_map = {}
def _map_error(exc, *errors):
for error in errors:
error_map[error] = exc
_map_error(ProgrammingError, ER.DB_CREATE_EXISTS, ER.SYNTAX_ERROR,
ER.PARSE_ERROR, ER.NO_SUCH_TABLE, ER.WRONG_DB_NAME,
ER.WRONG_TABLE_NAME, ER.FIELD_SPECIFIED_TWICE,
ER.INVALID_GROUP_FUNC_USE, ER.UNSUPPORTED_EXTENSION,
ER.TABLE_MUST_HAVE_COLUMNS, ER.CANT_DO_THIS_DURING_AN_TRANSACTION)
_map_error(DataError, ER.WARN_DATA_TRUNCATED, ER.WARN_NULL_TO_NOTNULL,
ER.WARN_DATA_OUT_OF_RANGE, ER.NO_DEFAULT, ER.PRIMARY_CANT_HAVE_NULL,
ER.DATA_TOO_LONG, ER.DATETIME_FUNCTION_OVERFLOW)
_map_error(IntegrityError, ER.DUP_ENTRY, ER.NO_REFERENCED_ROW,
ER.NO_REFERENCED_ROW_2, ER.ROW_IS_REFERENCED, ER.ROW_IS_REFERENCED_2,
ER.CANNOT_ADD_FOREIGN)
_map_error(NotSupportedError, ER.WARNING_NOT_COMPLETE_ROLLBACK,
ER.NOT_SUPPORTED_YET, ER.FEATURE_DISABLED, ER.UNKNOWN_STORAGE_ENGINE)
_map_error(OperationalError, ER.DBACCESS_DENIED_ERROR, ER.ACCESS_DENIED_ERROR,
ER.TABLEACCESS_DENIED_ERROR, ER.COLUMNACCESS_DENIED_ERROR)
del _map_error, ER
def _get_error_info(data):
errno = struct.unpack('<h', data[1:3])[0]
if sys.version_info[0] == 3:
is_41 = data[3] == ord("#")
else:
is_41 = data[3] == "#"
if is_41:
# version 4.1
sqlstate = data[4:9].decode("utf8")
errorvalue = data[9:].decode("utf8")
return (errno, sqlstate, errorvalue)
else:
# version 4.0
return (errno, None, data[3:].decode("utf8"))
def _check_mysql_exception(errinfo):
errno, sqlstate, errorvalue = errinfo
errorclass = error_map.get(errno, None)
if errorclass:
raise errorclass, (errno,errorvalue)
# couldn't find the right error number
raise InternalError, (errno, errorvalue)
def raise_mysql_exception(data):
errinfo = _get_error_info(data)
_check_mysql_exception(errinfo)
| Python |
import struct
def byte2int(b):
if isinstance(b, int):
return b
else:
return struct.unpack("!B", b)[0]
def int2byte(i):
return struct.pack("!B", i)
def join_bytes(bs):
if len(bs) == 0:
return ""
else:
rv = bs[0]
for b in bs[1:]:
rv += b
return rv
| Python |
MBLENGTH = {
8:1,
33:3,
88:2,
91:2
}
class Charset:
def __init__(self, id, name, collation, is_default):
self.id, self.name, self.collation = id, name, collation
self.is_default = is_default == 'Yes'
class Charsets:
def __init__(self):
self._by_id = {}
def add(self, c):
self._by_id[c.id] = c
def by_id(self, id):
return self._by_id[id]
def by_name(self, name):
for c in self._by_id.values():
if c.name == name and c.is_default:
return c
_charsets = Charsets()
"""
Generated with:
mysql -N -s -e "select id, character_set_name, collation_name, is_default
from information_schema.collations order by id;" | python -c "import sys
for l in sys.stdin.readlines():
id, name, collation, is_default = l.split(chr(9))
print '_charsets.add(Charset(%s, \'%s\', \'%s\', \'%s\'))' \
% (id, name, collation, is_default.strip())
"
"""
_charsets.add(Charset(1, 'big5', 'big5_chinese_ci', 'Yes'))
_charsets.add(Charset(2, 'latin2', 'latin2_czech_cs', ''))
_charsets.add(Charset(3, 'dec8', 'dec8_swedish_ci', 'Yes'))
_charsets.add(Charset(4, 'cp850', 'cp850_general_ci', 'Yes'))
_charsets.add(Charset(5, 'latin1', 'latin1_german1_ci', ''))
_charsets.add(Charset(6, 'hp8', 'hp8_english_ci', 'Yes'))
_charsets.add(Charset(7, 'koi8r', 'koi8r_general_ci', 'Yes'))
_charsets.add(Charset(8, 'latin1', 'latin1_swedish_ci', 'Yes'))
_charsets.add(Charset(9, 'latin2', 'latin2_general_ci', 'Yes'))
_charsets.add(Charset(10, 'swe7', 'swe7_swedish_ci', 'Yes'))
_charsets.add(Charset(11, 'ascii', 'ascii_general_ci', 'Yes'))
_charsets.add(Charset(12, 'ujis', 'ujis_japanese_ci', 'Yes'))
_charsets.add(Charset(13, 'sjis', 'sjis_japanese_ci', 'Yes'))
_charsets.add(Charset(14, 'cp1251', 'cp1251_bulgarian_ci', ''))
_charsets.add(Charset(15, 'latin1', 'latin1_danish_ci', ''))
_charsets.add(Charset(16, 'hebrew', 'hebrew_general_ci', 'Yes'))
_charsets.add(Charset(18, 'tis620', 'tis620_thai_ci', 'Yes'))
_charsets.add(Charset(19, 'euckr', 'euckr_korean_ci', 'Yes'))
_charsets.add(Charset(20, 'latin7', 'latin7_estonian_cs', ''))
_charsets.add(Charset(21, 'latin2', 'latin2_hungarian_ci', ''))
_charsets.add(Charset(22, 'koi8u', 'koi8u_general_ci', 'Yes'))
_charsets.add(Charset(23, 'cp1251', 'cp1251_ukrainian_ci', ''))
_charsets.add(Charset(24, 'gb2312', 'gb2312_chinese_ci', 'Yes'))
_charsets.add(Charset(25, 'greek', 'greek_general_ci', 'Yes'))
_charsets.add(Charset(26, 'cp1250', 'cp1250_general_ci', 'Yes'))
_charsets.add(Charset(27, 'latin2', 'latin2_croatian_ci', ''))
_charsets.add(Charset(28, 'gbk', 'gbk_chinese_ci', 'Yes'))
_charsets.add(Charset(29, 'cp1257', 'cp1257_lithuanian_ci', ''))
_charsets.add(Charset(30, 'latin5', 'latin5_turkish_ci', 'Yes'))
_charsets.add(Charset(31, 'latin1', 'latin1_german2_ci', ''))
_charsets.add(Charset(32, 'armscii8', 'armscii8_general_ci', 'Yes'))
_charsets.add(Charset(33, 'utf8', 'utf8_general_ci', 'Yes'))
_charsets.add(Charset(34, 'cp1250', 'cp1250_czech_cs', ''))
_charsets.add(Charset(35, 'ucs2', 'ucs2_general_ci', 'Yes'))
_charsets.add(Charset(36, 'cp866', 'cp866_general_ci', 'Yes'))
_charsets.add(Charset(37, 'keybcs2', 'keybcs2_general_ci', 'Yes'))
_charsets.add(Charset(38, 'macce', 'macce_general_ci', 'Yes'))
_charsets.add(Charset(39, 'macroman', 'macroman_general_ci', 'Yes'))
_charsets.add(Charset(40, 'cp852', 'cp852_general_ci', 'Yes'))
_charsets.add(Charset(41, 'latin7', 'latin7_general_ci', 'Yes'))
_charsets.add(Charset(42, 'latin7', 'latin7_general_cs', ''))
_charsets.add(Charset(43, 'macce', 'macce_bin', ''))
_charsets.add(Charset(44, 'cp1250', 'cp1250_croatian_ci', ''))
_charsets.add(Charset(47, 'latin1', 'latin1_bin', ''))
_charsets.add(Charset(48, 'latin1', 'latin1_general_ci', ''))
_charsets.add(Charset(49, 'latin1', 'latin1_general_cs', ''))
_charsets.add(Charset(50, 'cp1251', 'cp1251_bin', ''))
_charsets.add(Charset(51, 'cp1251', 'cp1251_general_ci', 'Yes'))
_charsets.add(Charset(52, 'cp1251', 'cp1251_general_cs', ''))
_charsets.add(Charset(53, 'macroman', 'macroman_bin', ''))
_charsets.add(Charset(57, 'cp1256', 'cp1256_general_ci', 'Yes'))
_charsets.add(Charset(58, 'cp1257', 'cp1257_bin', ''))
_charsets.add(Charset(59, 'cp1257', 'cp1257_general_ci', 'Yes'))
_charsets.add(Charset(63, 'binary', 'binary', 'Yes'))
_charsets.add(Charset(64, 'armscii8', 'armscii8_bin', ''))
_charsets.add(Charset(65, 'ascii', 'ascii_bin', ''))
_charsets.add(Charset(66, 'cp1250', 'cp1250_bin', ''))
_charsets.add(Charset(67, 'cp1256', 'cp1256_bin', ''))
_charsets.add(Charset(68, 'cp866', 'cp866_bin', ''))
_charsets.add(Charset(69, 'dec8', 'dec8_bin', ''))
_charsets.add(Charset(70, 'greek', 'greek_bin', ''))
_charsets.add(Charset(71, 'hebrew', 'hebrew_bin', ''))
_charsets.add(Charset(72, 'hp8', 'hp8_bin', ''))
_charsets.add(Charset(73, 'keybcs2', 'keybcs2_bin', ''))
_charsets.add(Charset(74, 'koi8r', 'koi8r_bin', ''))
_charsets.add(Charset(75, 'koi8u', 'koi8u_bin', ''))
_charsets.add(Charset(77, 'latin2', 'latin2_bin', ''))
_charsets.add(Charset(78, 'latin5', 'latin5_bin', ''))
_charsets.add(Charset(79, 'latin7', 'latin7_bin', ''))
_charsets.add(Charset(80, 'cp850', 'cp850_bin', ''))
_charsets.add(Charset(81, 'cp852', 'cp852_bin', ''))
_charsets.add(Charset(82, 'swe7', 'swe7_bin', ''))
_charsets.add(Charset(83, 'utf8', 'utf8_bin', ''))
_charsets.add(Charset(84, 'big5', 'big5_bin', ''))
_charsets.add(Charset(85, 'euckr', 'euckr_bin', ''))
_charsets.add(Charset(86, 'gb2312', 'gb2312_bin', ''))
_charsets.add(Charset(87, 'gbk', 'gbk_bin', ''))
_charsets.add(Charset(88, 'sjis', 'sjis_bin', ''))
_charsets.add(Charset(89, 'tis620', 'tis620_bin', ''))
_charsets.add(Charset(90, 'ucs2', 'ucs2_bin', ''))
_charsets.add(Charset(91, 'ujis', 'ujis_bin', ''))
_charsets.add(Charset(92, 'geostd8', 'geostd8_general_ci', 'Yes'))
_charsets.add(Charset(93, 'geostd8', 'geostd8_bin', ''))
_charsets.add(Charset(94, 'latin1', 'latin1_spanish_ci', ''))
_charsets.add(Charset(95, 'cp932', 'cp932_japanese_ci', 'Yes'))
_charsets.add(Charset(96, 'cp932', 'cp932_bin', ''))
_charsets.add(Charset(97, 'eucjpms', 'eucjpms_japanese_ci', 'Yes'))
_charsets.add(Charset(98, 'eucjpms', 'eucjpms_bin', ''))
_charsets.add(Charset(99, 'cp1250', 'cp1250_polish_ci', ''))
_charsets.add(Charset(128, 'ucs2', 'ucs2_unicode_ci', ''))
_charsets.add(Charset(129, 'ucs2', 'ucs2_icelandic_ci', ''))
_charsets.add(Charset(130, 'ucs2', 'ucs2_latvian_ci', ''))
_charsets.add(Charset(131, 'ucs2', 'ucs2_romanian_ci', ''))
_charsets.add(Charset(132, 'ucs2', 'ucs2_slovenian_ci', ''))
_charsets.add(Charset(133, 'ucs2', 'ucs2_polish_ci', ''))
_charsets.add(Charset(134, 'ucs2', 'ucs2_estonian_ci', ''))
_charsets.add(Charset(135, 'ucs2', 'ucs2_spanish_ci', ''))
_charsets.add(Charset(136, 'ucs2', 'ucs2_swedish_ci', ''))
_charsets.add(Charset(137, 'ucs2', 'ucs2_turkish_ci', ''))
_charsets.add(Charset(138, 'ucs2', 'ucs2_czech_ci', ''))
_charsets.add(Charset(139, 'ucs2', 'ucs2_danish_ci', ''))
_charsets.add(Charset(140, 'ucs2', 'ucs2_lithuanian_ci', ''))
_charsets.add(Charset(141, 'ucs2', 'ucs2_slovak_ci', ''))
_charsets.add(Charset(142, 'ucs2', 'ucs2_spanish2_ci', ''))
_charsets.add(Charset(143, 'ucs2', 'ucs2_roman_ci', ''))
_charsets.add(Charset(144, 'ucs2', 'ucs2_persian_ci', ''))
_charsets.add(Charset(145, 'ucs2', 'ucs2_esperanto_ci', ''))
_charsets.add(Charset(146, 'ucs2', 'ucs2_hungarian_ci', ''))
_charsets.add(Charset(192, 'utf8', 'utf8_unicode_ci', ''))
_charsets.add(Charset(193, 'utf8', 'utf8_icelandic_ci', ''))
_charsets.add(Charset(194, 'utf8', 'utf8_latvian_ci', ''))
_charsets.add(Charset(195, 'utf8', 'utf8_romanian_ci', ''))
_charsets.add(Charset(196, 'utf8', 'utf8_slovenian_ci', ''))
_charsets.add(Charset(197, 'utf8', 'utf8_polish_ci', ''))
_charsets.add(Charset(198, 'utf8', 'utf8_estonian_ci', ''))
_charsets.add(Charset(199, 'utf8', 'utf8_spanish_ci', ''))
_charsets.add(Charset(200, 'utf8', 'utf8_swedish_ci', ''))
_charsets.add(Charset(201, 'utf8', 'utf8_turkish_ci', ''))
_charsets.add(Charset(202, 'utf8', 'utf8_czech_ci', ''))
_charsets.add(Charset(203, 'utf8', 'utf8_danish_ci', ''))
_charsets.add(Charset(204, 'utf8', 'utf8_lithuanian_ci', ''))
_charsets.add(Charset(205, 'utf8', 'utf8_slovak_ci', ''))
_charsets.add(Charset(206, 'utf8', 'utf8_spanish2_ci', ''))
_charsets.add(Charset(207, 'utf8', 'utf8_roman_ci', ''))
_charsets.add(Charset(208, 'utf8', 'utf8_persian_ci', ''))
_charsets.add(Charset(209, 'utf8', 'utf8_esperanto_ci', ''))
_charsets.add(Charset(210, 'utf8', 'utf8_hungarian_ci', ''))
def charset_by_name(name):
return _charsets.by_name(name)
def charset_by_id(id):
return _charsets.by_id(id)
| Python |
import pymysql
import unittest
class PyMySQLTestCase(unittest.TestCase):
# Edit this to suit your test environment.
databases = [
{"host":"localhost","user":"root",
"passwd":"","db":"test_pymysql", "use_unicode": True},
{"host":"localhost","user":"root","passwd":"","db":"test_pymysql2"}]
def setUp(self):
self.connections = []
for params in self.databases:
self.connections.append(pymysql.connect(**params))
def tearDown(self):
for connection in self.connections:
connection.close()
| Python |
from pymysql.tests.test_issues import *
from pymysql.tests.test_example import *
from pymysql.tests.test_basic import *
from pymysql.tests.test_DictCursor import *
import sys
if sys.version_info[0] == 2:
# MySQLdb tests were designed for Python 3
from pymysql.tests.thirdparty import *
if __name__ == "__main__":
import unittest
unittest.main()
| Python |
'''
PyMySQL: A pure-Python drop-in replacement for MySQLdb.
Copyright (c) 2010 PyMySQL contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
VERSION = (0, 5, None)
from constants import FIELD_TYPE
from converters import escape_dict, escape_sequence, escape_string
from err import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError, MySQLError
from times import Date, Time, Timestamp, \
DateFromTicks, TimeFromTicks, TimestampFromTicks
import sys
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
try:
from sets import BaseSet as set
except ImportError:
from sets import Set as set
threadsafety = 1
apilevel = "2.0"
paramstyle = "format"
class DBAPISet(frozenset):
def __ne__(self, other):
if isinstance(other, set):
return super(DBAPISet, self).__ne__(self, other)
else:
return other not in self
def __eq__(self, other):
if isinstance(other, frozenset):
return frozenset.__eq__(self, other)
else:
return other in self
def __hash__(self):
return frozenset.__hash__(self)
STRING = DBAPISet([FIELD_TYPE.ENUM, FIELD_TYPE.STRING,
FIELD_TYPE.VAR_STRING])
BINARY = DBAPISet([FIELD_TYPE.BLOB, FIELD_TYPE.LONG_BLOB,
FIELD_TYPE.MEDIUM_BLOB, FIELD_TYPE.TINY_BLOB])
NUMBER = DBAPISet([FIELD_TYPE.DECIMAL, FIELD_TYPE.DOUBLE, FIELD_TYPE.FLOAT,
FIELD_TYPE.INT24, FIELD_TYPE.LONG, FIELD_TYPE.LONGLONG,
FIELD_TYPE.TINY, FIELD_TYPE.YEAR])
DATE = DBAPISet([FIELD_TYPE.DATE, FIELD_TYPE.NEWDATE])
TIME = DBAPISet([FIELD_TYPE.TIME])
TIMESTAMP = DBAPISet([FIELD_TYPE.TIMESTAMP, FIELD_TYPE.DATETIME])
DATETIME = TIMESTAMP
ROWID = DBAPISet()
def Binary(x):
"""Return x as a binary type."""
return str(x)
def Connect(*args, **kwargs):
"""
Connect to the database; see connections.Connection.__init__() for
more information.
"""
from connections import Connection
return Connection(*args, **kwargs)
def get_client_info(): # for MySQLdb compatibility
return '%s.%s.%s' % VERSION
connect = Connection = Connect
# we include a doctored version_info here for MySQLdb compatibility
version_info = (1,2,2,"final",0)
NULL = "NULL"
__version__ = get_client_info()
def thread_safe():
return True # match MySQLdb.thread_safe()
def install_as_MySQLdb():
"""
After this function is called, any application that imports MySQLdb or
_mysql will unwittingly actually use
"""
sys.modules["MySQLdb"] = sys.modules["_mysql"] = sys.modules["pymysql"]
__all__ = [
'BINARY', 'Binary', 'Connect', 'Connection', 'DATE', 'Date',
'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks',
'DataError', 'DatabaseError', 'Error', 'FIELD_TYPE', 'IntegrityError',
'InterfaceError', 'InternalError', 'MySQLError', 'NULL', 'NUMBER',
'NotSupportedError', 'DBAPISet', 'OperationalError', 'ProgrammingError',
'ROWID', 'STRING', 'TIME', 'TIMESTAMP', 'Warning', 'apilevel', 'connect',
'connections', 'constants', 'converters', 'cursors',
'escape_dict', 'escape_sequence', 'escape_string', 'get_client_info',
'paramstyle', 'threadsafety', 'version_info',
"install_as_MySQLdb",
"NULL","__version__",
]
| Python |
# Python implementation of the MySQL client-server protocol
# http://forge.mysql.com/wiki/MySQL_Internals_ClientServer_Protocol
try:
import hashlib
sha_new = lambda *args, **kwargs: hashlib.new("sha1", *args, **kwargs)
except ImportError:
import sha
sha_new = sha.new
import socket
try:
import ssl
SSL_ENABLED = True
except ImportError:
SSL_ENABLED = False
import struct
import sys
import os
import ConfigParser
try:
import cStringIO as StringIO
except ImportError:
import StringIO
try:
import getpass
DEFAULT_USER = getpass.getuser()
except ImportError:
DEFAULT_USER = None
from charset import MBLENGTH, charset_by_name, charset_by_id
from cursors import Cursor
from constants import FIELD_TYPE, FLAG
from constants import SERVER_STATUS
from constants.CLIENT import *
from constants.COMMAND import *
from util import join_bytes, byte2int, int2byte
from converters import escape_item, encoders, decoders
from err import raise_mysql_exception, Warning, Error, \
InterfaceError, DataError, DatabaseError, OperationalError, \
IntegrityError, InternalError, NotSupportedError, ProgrammingError
DEBUG = False
NULL_COLUMN = 251
UNSIGNED_CHAR_COLUMN = 251
UNSIGNED_SHORT_COLUMN = 252
UNSIGNED_INT24_COLUMN = 253
UNSIGNED_INT64_COLUMN = 254
UNSIGNED_CHAR_LENGTH = 1
UNSIGNED_SHORT_LENGTH = 2
UNSIGNED_INT24_LENGTH = 3
UNSIGNED_INT64_LENGTH = 8
DEFAULT_CHARSET = 'latin1'
def dump_packet(data):
def is_ascii(data):
if byte2int(data) >= 65 and byte2int(data) <= 122: #data.isalnum():
return data
return '.'
try:
print "packet length %d" % len(data)
print "method call[1]: %s" % sys._getframe(1).f_code.co_name
print "method call[2]: %s" % sys._getframe(2).f_code.co_name
print "method call[3]: %s" % sys._getframe(3).f_code.co_name
print "method call[4]: %s" % sys._getframe(4).f_code.co_name
print "method call[5]: %s" % sys._getframe(5).f_code.co_name
print "-" * 88
except ValueError: pass
dump_data = [data[i:i+16] for i in xrange(len(data)) if i%16 == 0]
for d in dump_data:
print ' '.join(map(lambda x:"%02X" % byte2int(x), d)) + \
' ' * (16 - len(d)) + ' ' * 2 + \
' '.join(map(lambda x:"%s" % is_ascii(x), d))
print "-" * 88
print ""
def _scramble(password, message):
if password == None or len(password) == 0:
return int2byte(0)
if DEBUG: print 'password=' + password
stage1 = sha_new(password).digest()
stage2 = sha_new(stage1).digest()
s = sha_new()
s.update(message)
s.update(stage2)
result = s.digest()
return _my_crypt(result, stage1)
def _my_crypt(message1, message2):
length = len(message1)
result = struct.pack('B', length)
for i in xrange(length):
x = (struct.unpack('B', message1[i:i+1])[0] ^ \
struct.unpack('B', message2[i:i+1])[0])
result += struct.pack('B', x)
return result
# old_passwords support ported from libmysql/password.c
SCRAMBLE_LENGTH_323 = 8
class RandStruct_323(object):
def __init__(self, seed1, seed2):
self.max_value = 0x3FFFFFFFL
self.seed1 = seed1 % self.max_value
self.seed2 = seed2 % self.max_value
def my_rnd(self):
self.seed1 = (self.seed1 * 3L + self.seed2) % self.max_value
self.seed2 = (self.seed1 + self.seed2 + 33L) % self.max_value
return float(self.seed1) / float(self.max_value)
def _scramble_323(password, message):
hash_pass = _hash_password_323(password)
hash_message = _hash_password_323(message[:SCRAMBLE_LENGTH_323])
hash_pass_n = struct.unpack(">LL", hash_pass)
hash_message_n = struct.unpack(">LL", hash_message)
rand_st = RandStruct_323(hash_pass_n[0] ^ hash_message_n[0],
hash_pass_n[1] ^ hash_message_n[1])
outbuf = StringIO.StringIO()
for _ in xrange(min(SCRAMBLE_LENGTH_323, len(message))):
outbuf.write(int2byte(int(rand_st.my_rnd() * 31) + 64))
extra = int2byte(int(rand_st.my_rnd() * 31))
out = outbuf.getvalue()
outbuf = StringIO.StringIO()
for c in out:
outbuf.write(int2byte(byte2int(c) ^ byte2int(extra)))
return outbuf.getvalue()
def _hash_password_323(password):
nr = 1345345333L
add = 7L
nr2 = 0x12345671L
for c in [byte2int(x) for x in password if x not in (' ', '\t')]:
nr^= (((nr & 63)+add)*c)+ (nr << 8) & 0xFFFFFFFF
nr2= (nr2 + ((nr2 << 8) ^ nr)) & 0xFFFFFFFF
add= (add + c) & 0xFFFFFFFF
r1 = nr & ((1L << 31) - 1L) # kill sign bits
r2 = nr2 & ((1L << 31) - 1L)
# pack
return struct.pack(">LL", r1, r2)
def pack_int24(n):
return struct.pack('BBB', n&0xFF, (n>>8)&0xFF, (n>>16)&0xFF)
def unpack_uint16(n):
return struct.unpack('<H', n[0:2])[0]
# TODO: stop using bit-shifting in these functions...
# TODO: rename to "uint" to make it clear they're unsigned...
def unpack_int24(n):
try:
return struct.unpack('B',n[0])[0] + (struct.unpack('B', n[1])[0] << 8) +\
(struct.unpack('B',n[2])[0] << 16)
except TypeError:
return n[0] + (n[1] << 8) + (n[2] << 16)
def unpack_int32(n):
try:
return struct.unpack('B',n[0])[0] + (struct.unpack('B', n[1])[0] << 8) +\
(struct.unpack('B',n[2])[0] << 16) + (struct.unpack('B', n[3])[0] << 24)
except TypeError:
return n[0] + (n[1] << 8) + (n[2] << 16) + (n[3] << 24)
def unpack_int64(n):
try:
return struct.unpack('B',n[0])[0] + (struct.unpack('B', n[1])[0]<<8) +\
(struct.unpack('B',n[2])[0] << 16) + (struct.unpack('B',n[3])[0]<<24)+\
(struct.unpack('B',n[4])[0] << 32) + (struct.unpack('B',n[5])[0]<<40)+\
(struct.unpack('B',n[6])[0] << 48) + (struct.unpack('B',n[7])[0]<<56)
except TypeError:
return n[0] + (n[1] << 8) + (n[2] << 16) + (n[3] << 24) +\
(n[4] << 32) + (n[5] << 40) + (n[6] << 48) + (n[7] << 56)
def defaulterrorhandler(connection, cursor, errorclass, errorvalue):
err = errorclass, errorvalue
if DEBUG:
raise
if cursor:
cursor.messages.append(err)
else:
connection.messages.append(err)
del cursor
del connection
if not issubclass(errorclass, Error):
raise Error(errorclass, errorvalue)
else:
raise errorclass, errorvalue
class MysqlPacket(object):
"""Representation of a MySQL response packet. Reads in the packet
from the network socket, removes packet header and provides an interface
for reading/parsing the packet results."""
def __init__(self, connection):
self.connection = connection
self.__position = 0
self.__recv_packet()
def __recv_packet(self):
"""Parse the packet header and read entire packet payload into buffer."""
packet_header = self.connection.rfile.read(4)
if len(packet_header) < 4:
raise OperationalError(2013, "Lost connection to MySQL server during query")
if DEBUG: dump_packet(packet_header)
packet_length_bin = packet_header[:3]
self.__packet_number = byte2int(packet_header[3])
# TODO: check packet_num is correct (+1 from last packet)
bin_length = packet_length_bin + int2byte(0) # pad little-endian number
bytes_to_read = struct.unpack('<I', bin_length)[0]
recv_data = self.connection.rfile.read(bytes_to_read)
if len(recv_data) < bytes_to_read:
raise OperationalError(2013, "Lost connection to MySQL server during query")
if DEBUG: dump_packet(recv_data)
self.__data = recv_data
def packet_number(self): return self.__packet_number
def get_all_data(self): return self.__data
def read(self, size):
"""Read the first 'size' bytes in packet and advance cursor past them."""
result = self.peek(size)
self.advance(size)
return result
def read_all(self):
"""Read all remaining data in the packet.
(Subsequent read() or peek() will return errors.)
"""
result = self.__data[self.__position:]
self.__position = None # ensure no subsequent read() or peek()
return result
def advance(self, length):
"""Advance the cursor in data buffer 'length' bytes."""
new_position = self.__position + length
if new_position < 0 or new_position > len(self.__data):
raise Exception('Invalid advance amount (%s) for cursor. '
'Position=%s' % (length, new_position))
self.__position = new_position
def rewind(self, position=0):
"""Set the position of the data buffer cursor to 'position'."""
if position < 0 or position > len(self.__data):
raise Exception("Invalid position to rewind cursor to: %s." % position)
self.__position = position
def peek(self, size):
"""Look at the first 'size' bytes in packet without moving cursor."""
result = self.__data[self.__position:(self.__position+size)]
if len(result) != size:
error = ('Result length not requested length:\n'
'Expected=%s. Actual=%s. Position: %s. Data Length: %s'
% (size, len(result), self.__position, len(self.__data)))
if DEBUG:
print error
self.dump()
raise AssertionError(error)
return result
def get_bytes(self, position, length=1):
"""Get 'length' bytes starting at 'position'.
Position is start of payload (first four packet header bytes are not
included) starting at index '0'.
No error checking is done. If requesting outside end of buffer
an empty string (or string shorter than 'length') may be returned!
"""
return self.__data[position:(position+length)]
def read_length_coded_binary(self):
"""Read a 'Length Coded Binary' number from the data buffer.
Length coded numbers can be anywhere from 1 to 9 bytes depending
on the value of the first byte.
"""
c = byte2int(self.read(1))
if c == NULL_COLUMN:
return None
if c < UNSIGNED_CHAR_COLUMN:
return c
elif c == UNSIGNED_SHORT_COLUMN:
return unpack_uint16(self.read(UNSIGNED_SHORT_LENGTH))
elif c == UNSIGNED_INT24_COLUMN:
return unpack_int24(self.read(UNSIGNED_INT24_LENGTH))
elif c == UNSIGNED_INT64_COLUMN:
# TODO: what was 'longlong'? confirm it wasn't used?
return unpack_int64(self.read(UNSIGNED_INT64_LENGTH))
def read_length_coded_string(self):
"""Read a 'Length Coded String' from the data buffer.
A 'Length Coded String' consists first of a length coded
(unsigned, positive) integer represented in 1-9 bytes followed by
that many bytes of binary data. (For example "cat" would be "3cat".)
"""
length = self.read_length_coded_binary()
if length is None:
return None
return self.read(length)
def is_ok_packet(self):
return byte2int(self.get_bytes(0)) == 0
def is_eof_packet(self):
return byte2int(self.get_bytes(0)) == 254 # 'fe'
def is_resultset_packet(self):
field_count = byte2int(self.get_bytes(0))
return field_count >= 1 and field_count <= 250
def is_error_packet(self):
return byte2int(self.get_bytes(0)) == 255
def check_error(self):
if self.is_error_packet():
self.rewind()
self.advance(1) # field_count == error (we already know that)
errno = unpack_uint16(self.read(2))
if DEBUG: print "errno = %d" % errno
raise_mysql_exception(self.__data)
def dump(self):
dump_packet(self.__data)
class FieldDescriptorPacket(MysqlPacket):
"""A MysqlPacket that represents a specific column's metadata in the result.
Parsing is automatically done and the results are exported via public
attributes on the class such as: db, table_name, name, length, type_code.
"""
def __init__(self, *args):
MysqlPacket.__init__(self, *args)
self.__parse_field_descriptor()
def __parse_field_descriptor(self):
"""Parse the 'Field Descriptor' (Metadata) packet.
This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0).
"""
self.catalog = self.read_length_coded_string()
self.db = self.read_length_coded_string()
self.table_name = self.read_length_coded_string()
self.org_table = self.read_length_coded_string()
self.name = self.read_length_coded_string().decode(self.connection.charset)
self.org_name = self.read_length_coded_string()
self.advance(1) # non-null filler
self.charsetnr = struct.unpack('<H', self.read(2))[0]
self.length = struct.unpack('<I', self.read(4))[0]
self.type_code = byte2int(self.read(1))
self.flags = struct.unpack('<H', self.read(2))[0]
self.scale = byte2int(self.read(1)) # "decimals"
self.advance(2) # filler (always 0x00)
# 'default' is a length coded binary and is still in the buffer?
# not used for normal result sets...
def description(self):
"""Provides a 7-item tuple compatible with the Python PEP249 DB Spec."""
desc = []
desc.append(self.name)
desc.append(self.type_code)
desc.append(None) # TODO: display_length; should this be self.length?
desc.append(self.get_column_length()) # 'internal_size'
desc.append(self.get_column_length()) # 'precision' # TODO: why!?!?
desc.append(self.scale)
# 'null_ok' -- can this be True/False rather than 1/0?
# if so just do: desc.append(bool(self.flags % 2 == 0))
if self.flags % 2 == 0:
desc.append(1)
else:
desc.append(0)
return tuple(desc)
def get_column_length(self):
if self.type_code == FIELD_TYPE.VAR_STRING:
mblen = MBLENGTH.get(self.charsetnr, 1)
return self.length // mblen
return self.length
def __str__(self):
return ('%s %s.%s.%s, type=%s'
% (self.__class__, self.db, self.table_name, self.name,
self.type_code))
class OKPacketWrapper(object):
"""
OK Packet Wrapper. It uses an existing packet object, and wraps
around it, exposing useful variables while still providing access
to the original packet objects variables and methods.
"""
def __init__(self, from_packet):
if not from_packet.is_ok_packet():
raise ValueError('Cannot create ' + str(self.__class__.__name__)
+ ' object from invalid packet type')
self.packet = from_packet
self.packet.advance(1)
self.affected_rows = self.packet.read_length_coded_binary()
self.insert_id = self.packet.read_length_coded_binary()
self.server_status = struct.unpack('<H', self.packet.read(2))[0]
self.warning_count = struct.unpack('<H', self.packet.read(2))[0]
self.message = self.packet.read_all()
def __getattr__(self, key):
if hasattr(self.packet, key):
return getattr(self.packet, key)
raise AttributeError(str(self.__class__)
+ " instance has no attribute '" + key + "'")
class EOFPacketWrapper(object):
"""
EOF Packet Wrapper. It uses an existing packet object, and wraps
around it, exposing useful variables while still providing access
to the original packet objects variables and methods.
"""
def __init__(self, from_packet):
if not from_packet.is_eof_packet():
raise ValueError('Cannot create ' + str(self.__class__.__name__)
+ ' object from invalid packet type')
self.packet = from_packet
self.warning_count = self.packet.read(2)
server_status = struct.unpack('<h', self.packet.read(2))[0]
self.has_next = (server_status
& SERVER_STATUS.SERVER_MORE_RESULTS_EXISTS)
def __getattr__(self, key):
if hasattr(self.packet, key):
return getattr(self.packet, key)
raise AttributeError(str(self.__class__)
+ " instance has no attribute '" + key + "'")
class Connection(object):
"""
Representation of a socket with a mysql server.
The proper way to get an instance of this class is to call
connect()."""
errorhandler = defaulterrorhandler
def __init__(self, host="localhost", user=None, passwd="",
db=None, port=3306, unix_socket=None,
charset='', sql_mode=None,
read_default_file=None, conv=decoders, use_unicode=None,
client_flag=0, cursorclass=Cursor, init_command=None,
connect_timeout=None, ssl=None, read_default_group=None,
compress=None, named_pipe=None):
"""
Establish a connection to the MySQL database. Accepts several
arguments:
host: Host where the database server is located
user: Username to log in as
passwd: Password to use.
db: Database to use, None to not use a particular one.
port: MySQL port to use, default is usually OK.
unix_socket: Optionally, you can use a unix socket rather than TCP/IP.
charset: Charset you want to use.
sql_mode: Default SQL_MODE to use.
read_default_file: Specifies my.cnf file to read these parameters from under the [client] section.
conv: Decoders dictionary to use instead of the default one. This is used to provide custom marshalling of types. See converters.
use_unicode: Whether or not to default to unicode strings. This option defaults to true for Py3k.
client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.
cursorclass: Custom cursor class to use.
init_command: Initial SQL statement to run when connection is established.
connect_timeout: Timeout before throwing an exception when connecting.
ssl: A dict of arguments similar to mysql_ssl_set()'s parameters. For now the capath and cipher arguments are not supported.
read_default_group: Group to read from in the configuration file.
compress; Not supported
named_pipe: Not supported
"""
if use_unicode is None and sys.version_info[0] > 2:
use_unicode = True
if compress or named_pipe:
raise NotImplementedError, "compress and named_pipe arguments are not supported"
if ssl and (ssl.has_key('capath') or ssl.has_key('cipher')):
raise NotImplementedError, 'ssl options capath and cipher are not supported'
self.ssl = False
if ssl:
if not SSL_ENABLED:
raise NotImplementedError, "ssl module not found"
self.ssl = True
client_flag |= SSL
for k in ('key', 'cert', 'ca'):
v = None
if ssl.has_key(k):
v = ssl[k]
setattr(self, k, v)
if read_default_group and not read_default_file:
if sys.platform.startswith("win"):
read_default_file = "c:\\my.ini"
else:
read_default_file = "/etc/my.cnf"
if read_default_file:
if not read_default_group:
read_default_group = "client"
cfg = ConfigParser.RawConfigParser()
cfg.read(os.path.expanduser(read_default_file))
def _config(key, default):
try:
return cfg.get(read_default_group,key)
except:
return default
user = _config("user",user)
passwd = _config("password",passwd)
host = _config("host", host)
db = _config("db",db)
unix_socket = _config("socket",unix_socket)
port = int(_config("port", port))
charset = _config("default-character-set", charset)
self.host = host
self.port = port
self.user = user or DEFAULT_USER
self.password = passwd
self.db = db
self.unix_socket = unix_socket
if charset:
self.charset = charset
self.use_unicode = True
else:
self.charset = DEFAULT_CHARSET
self.use_unicode = False
if use_unicode is not None:
self.use_unicode = use_unicode
client_flag |= CAPABILITIES
client_flag |= MULTI_STATEMENTS
if self.db:
client_flag |= CONNECT_WITH_DB
self.client_flag = client_flag
self.cursorclass = cursorclass
self.connect_timeout = connect_timeout
self._connect()
self._result = None
self._affected_rows = 0
self.host_info = "Not connected"
self.messages = []
self.set_charset(charset)
self.encoders = encoders
self.decoders = conv
self.autocommit(False)
if sql_mode is not None:
c = self.cursor()
c.execute("SET sql_mode=%s", (sql_mode,))
self.commit()
if init_command is not None:
c = self.cursor()
c.execute(init_command)
self.commit()
def close(self):
''' Send the quit message and close the socket '''
if self.socket is None:
raise Error("Already closed")
send_data = struct.pack('<i',1) + int2byte(COM_QUIT)
self.wfile.write(send_data)
self.wfile.close()
self.rfile.close()
self.socket.close()
self.socket = None
self.rfile = None
self.wfile = None
def autocommit(self, value):
''' Set whether or not to commit after every execute() '''
try:
self._execute_command(COM_QUERY, "SET AUTOCOMMIT = %s" % \
self.escape(value))
self.read_packet()
except:
exc,value,tb = sys.exc_info()
self.errorhandler(None, exc, value)
def commit(self):
''' Commit changes to stable storage '''
try:
self._execute_command(COM_QUERY, "COMMIT")
self.read_packet()
except:
exc,value,tb = sys.exc_info()
self.errorhandler(None, exc, value)
def rollback(self):
''' Roll back the current transaction '''
try:
self._execute_command(COM_QUERY, "ROLLBACK")
self.read_packet()
except:
exc,value,tb = sys.exc_info()
self.errorhandler(None, exc, value)
def escape(self, obj):
''' Escape whatever value you pass to it '''
return escape_item(obj, self.charset)
def literal(self, obj):
''' Alias for escape() '''
return escape_item(obj, self.charset)
def cursor(self, cursor=None):
''' Create a new cursor to execute queries with '''
if cursor:
return cursor(self)
return self.cursorclass(self)
def __enter__(self):
''' Context manager that returns a Cursor '''
return self.cursor()
def __exit__(self, exc, value, traceback):
''' On successful exit, commit. On exception, rollback. '''
if exc:
self.rollback()
else:
self.commit()
# The following methods are INTERNAL USE ONLY (called from Cursor)
def query(self, sql, unbuffered=False):
if DEBUG:
print "sending query: %s" % sql
self._execute_command(COM_QUERY, sql)
self._affected_rows = self._read_query_result(unbuffered=unbuffered)
return self._affected_rows
def next_result(self):
self._affected_rows = self._read_query_result()
return self._affected_rows
def affected_rows(self):
return self._affected_rows
def kill(self, thread_id):
arg = struct.pack('<I', thread_id)
try:
self._execute_command(COM_PROCESS_KILL, arg)
except:
exc,value,tb = sys.exc_info()
self.errorhandler(None, exc, value)
return
pkt = self.read_packet()
return pkt.is_ok_packet()
def ping(self, reconnect=True):
''' Check if the server is alive '''
try:
self._execute_command(COM_PING, "")
pkt = self.read_packet()
return pkt.is_ok_packet()
except:
if reconnect:
self._connect()
return self.ping(False)
else:
exc,value,tb = sys.exc_info()
self.errorhandler(None, exc, value)
return
def set_charset(self, charset):
try:
if charset:
self._execute_command(COM_QUERY, "SET NAMES %s" %
self.escape(charset))
self.read_packet()
self.charset = charset
except:
exc,value,tb = sys.exc_info()
self.errorhandler(None, exc, value)
def _connect(self):
try:
if self.unix_socket and (self.host == 'localhost' or self.host == '127.0.0.1'):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
t = sock.gettimeout()
sock.settimeout(self.connect_timeout)
sock.connect(self.unix_socket)
sock.settimeout(t)
self.host_info = "Localhost via UNIX socket"
if DEBUG: print 'connected using unix_socket'
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
t = sock.gettimeout()
sock.settimeout(self.connect_timeout)
sock.connect((self.host, self.port))
sock.settimeout(t)
self.host_info = "socket %s:%d" % (self.host, self.port)
if DEBUG: print 'connected using socket'
self.socket = sock
self.rfile = self.socket.makefile("rb")
self.wfile = self.socket.makefile("wb")
self._get_server_information()
self._request_authentication()
except socket.error, e:
raise OperationalError(2003, "Can't connect to MySQL server on %r (%s)" % (self.host, e.args[0]))
def read_packet(self, packet_type=MysqlPacket):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results."""
packet = packet_type(self)
packet.check_error()
return packet
def _read_query_result(self, unbuffered=False):
if unbuffered:
try:
result = MySQLResult(self)
result.init_unbuffered_query()
except:
result.unbuffered_active = False
raise
else:
result = MySQLResult(self)
result.read()
self._result = result
return result.affected_rows
def insert_id(self):
if self._result:
return self._result.insert_id
else:
return 0
def _send_command(self, command, sql):
#send_data = struct.pack('<i', len(sql) + 1) + command + sql
# could probably be more efficient, at least it's correct
if not self.socket:
self.errorhandler(None, InterfaceError, "(0, '')")
# If the last query was unbuffered, make sure it finishes before
# sending new commands
if self._result is not None and self._result.unbuffered_active:
self._result._finish_unbuffered_query()
if isinstance(sql, unicode):
sql = sql.encode(self.charset)
prelude = struct.pack('<i', len(sql)+1) + int2byte(command)
self.wfile.write(prelude + sql)
self.wfile.flush()
if DEBUG: dump_packet(prelude + sql)
def _execute_command(self, command, sql):
self._send_command(command, sql)
def _request_authentication(self):
self._send_authentication()
def _send_authentication(self):
self.client_flag |= CAPABILITIES
if self.server_version.startswith('5'):
self.client_flag |= MULTI_RESULTS
if self.user is None:
raise ValueError, "Did not specify a username"
charset_id = charset_by_name(self.charset).id
self.user = self.user.encode(self.charset)
data_init = struct.pack('<i', self.client_flag) + struct.pack("<I", 1) + \
int2byte(charset_id) + int2byte(0)*23
next_packet = 1
if self.ssl:
data = pack_int24(len(data_init)) + int2byte(next_packet) + data_init
next_packet += 1
if DEBUG: dump_packet(data)
self.wfile.write(data)
self.wfile.flush()
self.socket = ssl.wrap_self.socketet(self.socket, keyfile=self.key,
certfile=self.cert,
ssl_version=ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca)
self.rfile = self.socket.makefile("rb")
self.wfile = self.socket.makefile("wb")
data = data_init + self.user+int2byte(0) + _scramble(self.password.encode(self.charset), self.salt)
if self.db:
self.db = self.db.encode(self.charset)
data += self.db + int2byte(0)
data = pack_int24(len(data)) + int2byte(next_packet) + data
next_packet += 2
if DEBUG: dump_packet(data)
self.wfile.write(data)
self.wfile.flush()
auth_packet = MysqlPacket(self)
auth_packet.check_error()
if DEBUG: auth_packet.dump()
# if old_passwords is enabled the packet will be 1 byte long and
# have the octet 254
if auth_packet.is_eof_packet():
# send legacy handshake
#raise NotImplementedError, "old_passwords are not supported. Check to see if mysqld was started with --old-passwords, if old-passwords=1 in a my.cnf file, or if there are some short hashes in your mysql.user table."
# TODO: is this the correct charset?
data = _scramble_323(self.password.encode(self.charset), self.salt.encode(self.charset)) + int2byte(0)
data = pack_int24(len(data)) + int2byte(next_packet) + data
self.wfile.write(data)
self.wfile.flush()
auth_packet = MysqlPacket(self)
auth_packet.check_error()
if DEBUG: auth_packet.dump()
# _mysql support
def thread_id(self):
return self.server_thread_id[0]
def character_set_name(self):
return self.charset
def get_host_info(self):
return self.host_info
def get_proto_info(self):
return self.protocol_version
def _get_server_information(self):
i = 0
packet = MysqlPacket(self)
data = packet.get_all_data()
if DEBUG: dump_packet(data)
#packet_len = byte2int(data[i:i+1])
#i += 4
self.protocol_version = byte2int(data[i:i+1])
i += 1
server_end = data.find(int2byte(0), i)
# TODO: is this the correct charset? should it be default_charset?
self.server_version = data[i:server_end].decode(self.charset)
i = server_end + 1
self.server_thread_id = struct.unpack('<h', data[i:i+2])
i += 4
self.salt = data[i:i+8]
i += 9
if len(data) >= i + 1:
i += 1
self.server_capabilities = struct.unpack('<h', data[i:i+2])[0]
i += 1
self.server_language = byte2int(data[i:i+1])
self.server_charset = charset_by_id(self.server_language).name
i += 16
if len(data) >= i+12-1:
rest_salt = data[i:i+12]
self.salt += rest_salt
def get_server_info(self):
return self.server_version
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
# TODO: move OK and EOF packet parsing/logic into a proper subclass
# of MysqlPacket like has been done with FieldDescriptorPacket.
class MySQLResult(object):
def __init__(self, connection):
from weakref import proxy
self.connection = proxy(connection)
self.affected_rows = None
self.insert_id = None
self.server_status = 0
self.warning_count = 0
self.message = None
self.field_count = 0
self.description = None
self.rows = None
self.has_next = None
self.unbuffered_active = False
def __del__(self):
if self.unbuffered_active:
self._finish_unbuffered_query()
def read(self):
self.first_packet = self.connection.read_packet()
# TODO: use classes for different packet types?
if self.first_packet.is_ok_packet():
self._read_ok_packet()
else:
self._read_result_packet()
def init_unbuffered_query(self):
self.unbuffered_active = True
self.first_packet = self.connection.read_packet()
if self.first_packet.is_ok_packet():
self._read_ok_packet()
self.unbuffered_active = False
else:
self.field_count = byte2int(self.first_packet.read(1))
self._get_descriptions()
# Apparently, MySQLdb picks this number because it's the maximum
# value of a 64bit unsigned integer. Since we're emulating MySQLdb,
# we set it to this instead of None, which would be preferred.
self.affected_rows = 18446744073709551615
def _read_ok_packet(self):
ok_packet = OKPacketWrapper(self.first_packet)
self.affected_rows = ok_packet.affected_rows
self.insert_id = ok_packet.insert_id
self.server_status = ok_packet.server_status
self.warning_count = ok_packet.warning_count
self.message = ok_packet.message
def _check_packet_is_eof(self, packet):
if packet.is_eof_packet():
eof_packet = EOFPacketWrapper(packet)
self.warning_count = eof_packet.warning_count
self.has_next = eof_packet.has_next
return True
return False
def _read_result_packet(self):
self.field_count = byte2int(self.first_packet.read(1))
self._get_descriptions()
self._read_rowdata_packet()
def _read_rowdata_packet_unbuffered(self):
# Check if in an active query
if self.unbuffered_active == False: return
# EOF
packet = self.connection.read_packet()
if self._check_packet_is_eof(packet):
self.unbuffered_active = False
self.rows = None
return
row = []
for field in self.fields:
data = packet.read_length_coded_string()
converted = None
if field.type_code in self.connection.decoders:
converter = self.connection.decoders[field.type_code]
if DEBUG: print "DEBUG: field=%s, converter=%s" % (field, converter)
if data != None:
converted = converter(self.connection, field, data)
row.append(converted)
self.affected_rows = 1
self.rows = tuple((row))
if DEBUG: self.rows
def _finish_unbuffered_query(self):
# After much reading on the MySQL protocol, it appears that there is,
# in fact, no way to stop MySQL from sending all the data after
# executing a query, so we just spin, and wait for an EOF packet.
while self.unbuffered_active:
packet = self.connection.read_packet()
if self._check_packet_is_eof(packet):
self.unbuffered_active = False
# TODO: implement this as an iteratable so that it is more
# memory efficient and lower-latency to client...
def _read_rowdata_packet(self):
"""Read a rowdata packet for each data row in the result set."""
rows = []
while True:
packet = self.connection.read_packet()
if self._check_packet_is_eof(packet):
break
row = []
for field in self.fields:
data = packet.read_length_coded_string()
converted = None
if field.type_code in self.connection.decoders:
converter = self.connection.decoders[field.type_code]
if DEBUG: print "DEBUG: field=%s, converter=%s" % (field, converter)
if data != None:
converted = converter(self.connection, field, data)
row.append(converted)
rows.append(tuple(row))
self.affected_rows = len(rows)
self.rows = tuple(rows)
if DEBUG: self.rows
def _get_descriptions(self):
"""Read a column descriptor packet for each column in the result."""
self.fields = []
description = []
for i in xrange(self.field_count):
field = self.connection.read_packet(FieldDescriptorPacket)
self.fields.append(field)
description.append(field.description())
eof_packet = self.connection.read_packet()
assert eof_packet.is_eof_packet(), 'Protocol error, expecting EOF'
self.description = tuple(description)
| Python |
COM_SLEEP = 0x00
COM_QUIT = 0x01
COM_INIT_DB = 0x02
COM_QUERY = 0x03
COM_FIELD_LIST = 0x04
COM_CREATE_DB = 0x05
COM_DROP_DB = 0x06
COM_REFRESH = 0x07
COM_SHUTDOWN = 0x08
COM_STATISTICS = 0x09
COM_PROCESS_INFO = 0x0a
COM_CONNECT = 0x0b
COM_PROCESS_KILL = 0x0c
COM_DEBUG = 0x0d
COM_PING = 0x0e
COM_TIME = 0x0f
COM_DELAYED_INSERT = 0x10
COM_CHANGE_USER = 0x11
COM_BINLOG_DUMP = 0x12
COM_TABLE_DUMP = 0x13
COM_CONNECT_OUT = 0x14
COM_REGISTER_SLAVE = 0x15
| Python |
DECIMAL = 0
TINY = 1
SHORT = 2
LONG = 3
FLOAT = 4
DOUBLE = 5
NULL = 6
TIMESTAMP = 7
LONGLONG = 8
INT24 = 9
DATE = 10
TIME = 11
DATETIME = 12
YEAR = 13
NEWDATE = 14
VARCHAR = 15
BIT = 16
NEWDECIMAL = 246
ENUM = 247
SET = 248
TINY_BLOB = 249
MEDIUM_BLOB = 250
LONG_BLOB = 251
BLOB = 252
VAR_STRING = 253
STRING = 254
GEOMETRY = 255
CHAR = TINY
INTERVAL = ENUM
| Python |
NOT_NULL = 1
PRI_KEY = 2
UNIQUE_KEY = 4
MULTIPLE_KEY = 8
BLOB = 16
UNSIGNED = 32
ZEROFILL = 64
BINARY = 128
ENUM = 256
AUTO_INCREMENT = 512
TIMESTAMP = 1024
SET = 2048
PART_KEY = 16384
GROUP = 32767
UNIQUE = 65536
| Python |
ERROR_FIRST = 1000
HASHCHK = 1000
NISAMCHK = 1001
NO = 1002
YES = 1003
CANT_CREATE_FILE = 1004
CANT_CREATE_TABLE = 1005
CANT_CREATE_DB = 1006
DB_CREATE_EXISTS = 1007
DB_DROP_EXISTS = 1008
DB_DROP_DELETE = 1009
DB_DROP_RMDIR = 1010
CANT_DELETE_FILE = 1011
CANT_FIND_SYSTEM_REC = 1012
CANT_GET_STAT = 1013
CANT_GET_WD = 1014
CANT_LOCK = 1015
CANT_OPEN_FILE = 1016
FILE_NOT_FOUND = 1017
CANT_READ_DIR = 1018
CANT_SET_WD = 1019
CHECKREAD = 1020
DISK_FULL = 1021
DUP_KEY = 1022
ERROR_ON_CLOSE = 1023
ERROR_ON_READ = 1024
ERROR_ON_RENAME = 1025
ERROR_ON_WRITE = 1026
FILE_USED = 1027
FILSORT_ABORT = 1028
FORM_NOT_FOUND = 1029
GET_ERRNO = 1030
ILLEGAL_HA = 1031
KEY_NOT_FOUND = 1032
NOT_FORM_FILE = 1033
NOT_KEYFILE = 1034
OLD_KEYFILE = 1035
OPEN_AS_READONLY = 1036
OUTOFMEMORY = 1037
OUT_OF_SORTMEMORY = 1038
UNEXPECTED_EOF = 1039
CON_COUNT_ERROR = 1040
OUT_OF_RESOURCES = 1041
BAD_HOST_ERROR = 1042
HANDSHAKE_ERROR = 1043
DBACCESS_DENIED_ERROR = 1044
ACCESS_DENIED_ERROR = 1045
NO_DB_ERROR = 1046
UNKNOWN_COM_ERROR = 1047
BAD_NULL_ERROR = 1048
BAD_DB_ERROR = 1049
TABLE_EXISTS_ERROR = 1050
BAD_TABLE_ERROR = 1051
NON_UNIQ_ERROR = 1052
SERVER_SHUTDOWN = 1053
BAD_FIELD_ERROR = 1054
WRONG_FIELD_WITH_GROUP = 1055
WRONG_GROUP_FIELD = 1056
WRONG_SUM_SELECT = 1057
WRONG_VALUE_COUNT = 1058
TOO_LONG_IDENT = 1059
DUP_FIELDNAME = 1060
DUP_KEYNAME = 1061
DUP_ENTRY = 1062
WRONG_FIELD_SPEC = 1063
PARSE_ERROR = 1064
EMPTY_QUERY = 1065
NONUNIQ_TABLE = 1066
INVALID_DEFAULT = 1067
MULTIPLE_PRI_KEY = 1068
TOO_MANY_KEYS = 1069
TOO_MANY_KEY_PARTS = 1070
TOO_LONG_KEY = 1071
KEY_COLUMN_DOES_NOT_EXITS = 1072
BLOB_USED_AS_KEY = 1073
TOO_BIG_FIELDLENGTH = 1074
WRONG_AUTO_KEY = 1075
READY = 1076
NORMAL_SHUTDOWN = 1077
GOT_SIGNAL = 1078
SHUTDOWN_COMPLETE = 1079
FORCING_CLOSE = 1080
IPSOCK_ERROR = 1081
NO_SUCH_INDEX = 1082
WRONG_FIELD_TERMINATORS = 1083
BLOBS_AND_NO_TERMINATED = 1084
TEXTFILE_NOT_READABLE = 1085
FILE_EXISTS_ERROR = 1086
LOAD_INFO = 1087
ALTER_INFO = 1088
WRONG_SUB_KEY = 1089
CANT_REMOVE_ALL_FIELDS = 1090
CANT_DROP_FIELD_OR_KEY = 1091
INSERT_INFO = 1092
UPDATE_TABLE_USED = 1093
NO_SUCH_THREAD = 1094
KILL_DENIED_ERROR = 1095
NO_TABLES_USED = 1096
TOO_BIG_SET = 1097
NO_UNIQUE_LOGFILE = 1098
TABLE_NOT_LOCKED_FOR_WRITE = 1099
TABLE_NOT_LOCKED = 1100
BLOB_CANT_HAVE_DEFAULT = 1101
WRONG_DB_NAME = 1102
WRONG_TABLE_NAME = 1103
TOO_BIG_SELECT = 1104
UNKNOWN_ERROR = 1105
UNKNOWN_PROCEDURE = 1106
WRONG_PARAMCOUNT_TO_PROCEDURE = 1107
WRONG_PARAMETERS_TO_PROCEDURE = 1108
UNKNOWN_TABLE = 1109
FIELD_SPECIFIED_TWICE = 1110
INVALID_GROUP_FUNC_USE = 1111
UNSUPPORTED_EXTENSION = 1112
TABLE_MUST_HAVE_COLUMNS = 1113
RECORD_FILE_FULL = 1114
UNKNOWN_CHARACTER_SET = 1115
TOO_MANY_TABLES = 1116
TOO_MANY_FIELDS = 1117
TOO_BIG_ROWSIZE = 1118
STACK_OVERRUN = 1119
WRONG_OUTER_JOIN = 1120
NULL_COLUMN_IN_INDEX = 1121
CANT_FIND_UDF = 1122
CANT_INITIALIZE_UDF = 1123
UDF_NO_PATHS = 1124
UDF_EXISTS = 1125
CANT_OPEN_LIBRARY = 1126
CANT_FIND_DL_ENTRY = 1127
FUNCTION_NOT_DEFINED = 1128
HOST_IS_BLOCKED = 1129
HOST_NOT_PRIVILEGED = 1130
PASSWORD_ANONYMOUS_USER = 1131
PASSWORD_NOT_ALLOWED = 1132
PASSWORD_NO_MATCH = 1133
UPDATE_INFO = 1134
CANT_CREATE_THREAD = 1135
WRONG_VALUE_COUNT_ON_ROW = 1136
CANT_REOPEN_TABLE = 1137
INVALID_USE_OF_NULL = 1138
REGEXP_ERROR = 1139
MIX_OF_GROUP_FUNC_AND_FIELDS = 1140
NONEXISTING_GRANT = 1141
TABLEACCESS_DENIED_ERROR = 1142
COLUMNACCESS_DENIED_ERROR = 1143
ILLEGAL_GRANT_FOR_TABLE = 1144
GRANT_WRONG_HOST_OR_USER = 1145
NO_SUCH_TABLE = 1146
NONEXISTING_TABLE_GRANT = 1147
NOT_ALLOWED_COMMAND = 1148
SYNTAX_ERROR = 1149
DELAYED_CANT_CHANGE_LOCK = 1150
TOO_MANY_DELAYED_THREADS = 1151
ABORTING_CONNECTION = 1152
NET_PACKET_TOO_LARGE = 1153
NET_READ_ERROR_FROM_PIPE = 1154
NET_FCNTL_ERROR = 1155
NET_PACKETS_OUT_OF_ORDER = 1156
NET_UNCOMPRESS_ERROR = 1157
NET_READ_ERROR = 1158
NET_READ_INTERRUPTED = 1159
NET_ERROR_ON_WRITE = 1160
NET_WRITE_INTERRUPTED = 1161
TOO_LONG_STRING = 1162
TABLE_CANT_HANDLE_BLOB = 1163
TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164
DELAYED_INSERT_TABLE_LOCKED = 1165
WRONG_COLUMN_NAME = 1166
WRONG_KEY_COLUMN = 1167
WRONG_MRG_TABLE = 1168
DUP_UNIQUE = 1169
BLOB_KEY_WITHOUT_LENGTH = 1170
PRIMARY_CANT_HAVE_NULL = 1171
TOO_MANY_ROWS = 1172
REQUIRES_PRIMARY_KEY = 1173
NO_RAID_COMPILED = 1174
UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175
KEY_DOES_NOT_EXITS = 1176
CHECK_NO_SUCH_TABLE = 1177
CHECK_NOT_IMPLEMENTED = 1178
CANT_DO_THIS_DURING_AN_TRANSACTION = 1179
ERROR_DURING_COMMIT = 1180
ERROR_DURING_ROLLBACK = 1181
ERROR_DURING_FLUSH_LOGS = 1182
ERROR_DURING_CHECKPOINT = 1183
NEW_ABORTING_CONNECTION = 1184
DUMP_NOT_IMPLEMENTED = 1185
FLUSH_MASTER_BINLOG_CLOSED = 1186
INDEX_REBUILD = 1187
MASTER = 1188
MASTER_NET_READ = 1189
MASTER_NET_WRITE = 1190
FT_MATCHING_KEY_NOT_FOUND = 1191
LOCK_OR_ACTIVE_TRANSACTION = 1192
UNKNOWN_SYSTEM_VARIABLE = 1193
CRASHED_ON_USAGE = 1194
CRASHED_ON_REPAIR = 1195
WARNING_NOT_COMPLETE_ROLLBACK = 1196
TRANS_CACHE_FULL = 1197
SLAVE_MUST_STOP = 1198
SLAVE_NOT_RUNNING = 1199
BAD_SLAVE = 1200
MASTER_INFO = 1201
SLAVE_THREAD = 1202
TOO_MANY_USER_CONNECTIONS = 1203
SET_CONSTANTS_ONLY = 1204
LOCK_WAIT_TIMEOUT = 1205
LOCK_TABLE_FULL = 1206
READ_ONLY_TRANSACTION = 1207
DROP_DB_WITH_READ_LOCK = 1208
CREATE_DB_WITH_READ_LOCK = 1209
WRONG_ARGUMENTS = 1210
NO_PERMISSION_TO_CREATE_USER = 1211
UNION_TABLES_IN_DIFFERENT_DIR = 1212
LOCK_DEADLOCK = 1213
TABLE_CANT_HANDLE_FT = 1214
CANNOT_ADD_FOREIGN = 1215
NO_REFERENCED_ROW = 1216
ROW_IS_REFERENCED = 1217
CONNECT_TO_MASTER = 1218
QUERY_ON_MASTER = 1219
ERROR_WHEN_EXECUTING_COMMAND = 1220
WRONG_USAGE = 1221
WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222
CANT_UPDATE_WITH_READLOCK = 1223
MIXING_NOT_ALLOWED = 1224
DUP_ARGUMENT = 1225
USER_LIMIT_REACHED = 1226
SPECIFIC_ACCESS_DENIED_ERROR = 1227
LOCAL_VARIABLE = 1228
GLOBAL_VARIABLE = 1229
NO_DEFAULT = 1230
WRONG_VALUE_FOR_VAR = 1231
WRONG_TYPE_FOR_VAR = 1232
VAR_CANT_BE_READ = 1233
CANT_USE_OPTION_HERE = 1234
NOT_SUPPORTED_YET = 1235
MASTER_FATAL_ERROR_READING_BINLOG = 1236
SLAVE_IGNORED_TABLE = 1237
INCORRECT_GLOBAL_LOCAL_VAR = 1238
WRONG_FK_DEF = 1239
KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240
OPERAND_COLUMNS = 1241
SUBQUERY_NO_1_ROW = 1242
UNKNOWN_STMT_HANDLER = 1243
CORRUPT_HELP_DB = 1244
CYCLIC_REFERENCE = 1245
AUTO_CONVERT = 1246
ILLEGAL_REFERENCE = 1247
DERIVED_MUST_HAVE_ALIAS = 1248
SELECT_REDUCED = 1249
TABLENAME_NOT_ALLOWED_HERE = 1250
NOT_SUPPORTED_AUTH_MODE = 1251
SPATIAL_CANT_HAVE_NULL = 1252
COLLATION_CHARSET_MISMATCH = 1253
SLAVE_WAS_RUNNING = 1254
SLAVE_WAS_NOT_RUNNING = 1255
TOO_BIG_FOR_UNCOMPRESS = 1256
ZLIB_Z_MEM_ERROR = 1257
ZLIB_Z_BUF_ERROR = 1258
ZLIB_Z_DATA_ERROR = 1259
CUT_VALUE_GROUP_CONCAT = 1260
WARN_TOO_FEW_RECORDS = 1261
WARN_TOO_MANY_RECORDS = 1262
WARN_NULL_TO_NOTNULL = 1263
WARN_DATA_OUT_OF_RANGE = 1264
WARN_DATA_TRUNCATED = 1265
WARN_USING_OTHER_HANDLER = 1266
CANT_AGGREGATE_2COLLATIONS = 1267
DROP_USER = 1268
REVOKE_GRANTS = 1269
CANT_AGGREGATE_3COLLATIONS = 1270
CANT_AGGREGATE_NCOLLATIONS = 1271
VARIABLE_IS_NOT_STRUCT = 1272
UNKNOWN_COLLATION = 1273
SLAVE_IGNORED_SSL_PARAMS = 1274
SERVER_IS_IN_SECURE_AUTH_MODE = 1275
WARN_FIELD_RESOLVED = 1276
BAD_SLAVE_UNTIL_COND = 1277
MISSING_SKIP_SLAVE = 1278
UNTIL_COND_IGNORED = 1279
WRONG_NAME_FOR_INDEX = 1280
WRONG_NAME_FOR_CATALOG = 1281
WARN_QC_RESIZE = 1282
BAD_FT_COLUMN = 1283
UNKNOWN_KEY_CACHE = 1284
WARN_HOSTNAME_WONT_WORK = 1285
UNKNOWN_STORAGE_ENGINE = 1286
WARN_DEPRECATED_SYNTAX = 1287
NON_UPDATABLE_TABLE = 1288
FEATURE_DISABLED = 1289
OPTION_PREVENTS_STATEMENT = 1290
DUPLICATED_VALUE_IN_TYPE = 1291
TRUNCATED_WRONG_VALUE = 1292
TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293
INVALID_ON_UPDATE = 1294
UNSUPPORTED_PS = 1295
GET_ERRMSG = 1296
GET_TEMPORARY_ERRMSG = 1297
UNKNOWN_TIME_ZONE = 1298
WARN_INVALID_TIMESTAMP = 1299
INVALID_CHARACTER_STRING = 1300
WARN_ALLOWED_PACKET_OVERFLOWED = 1301
CONFLICTING_DECLARATIONS = 1302
SP_NO_RECURSIVE_CREATE = 1303
SP_ALREADY_EXISTS = 1304
SP_DOES_NOT_EXIST = 1305
SP_DROP_FAILED = 1306
SP_STORE_FAILED = 1307
SP_LILABEL_MISMATCH = 1308
SP_LABEL_REDEFINE = 1309
SP_LABEL_MISMATCH = 1310
SP_UNINIT_VAR = 1311
SP_BADSELECT = 1312
SP_BADRETURN = 1313
SP_BADSTATEMENT = 1314
UPDATE_LOG_DEPRECATED_IGNORED = 1315
UPDATE_LOG_DEPRECATED_TRANSLATED = 1316
QUERY_INTERRUPTED = 1317
SP_WRONG_NO_OF_ARGS = 1318
SP_COND_MISMATCH = 1319
SP_NORETURN = 1320
SP_NORETURNEND = 1321
SP_BAD_CURSOR_QUERY = 1322
SP_BAD_CURSOR_SELECT = 1323
SP_CURSOR_MISMATCH = 1324
SP_CURSOR_ALREADY_OPEN = 1325
SP_CURSOR_NOT_OPEN = 1326
SP_UNDECLARED_VAR = 1327
SP_WRONG_NO_OF_FETCH_ARGS = 1328
SP_FETCH_NO_DATA = 1329
SP_DUP_PARAM = 1330
SP_DUP_VAR = 1331
SP_DUP_COND = 1332
SP_DUP_CURS = 1333
SP_CANT_ALTER = 1334
SP_SUBSELECT_NYI = 1335
STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336
SP_VARCOND_AFTER_CURSHNDLR = 1337
SP_CURSOR_AFTER_HANDLER = 1338
SP_CASE_NOT_FOUND = 1339
FPARSER_TOO_BIG_FILE = 1340
FPARSER_BAD_HEADER = 1341
FPARSER_EOF_IN_COMMENT = 1342
FPARSER_ERROR_IN_PARAMETER = 1343
FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344
VIEW_NO_EXPLAIN = 1345
FRM_UNKNOWN_TYPE = 1346
WRONG_OBJECT = 1347
NONUPDATEABLE_COLUMN = 1348
VIEW_SELECT_DERIVED = 1349
VIEW_SELECT_CLAUSE = 1350
VIEW_SELECT_VARIABLE = 1351
VIEW_SELECT_TMPTABLE = 1352
VIEW_WRONG_LIST = 1353
WARN_VIEW_MERGE = 1354
WARN_VIEW_WITHOUT_KEY = 1355
VIEW_INVALID = 1356
SP_NO_DROP_SP = 1357
SP_GOTO_IN_HNDLR = 1358
TRG_ALREADY_EXISTS = 1359
TRG_DOES_NOT_EXIST = 1360
TRG_ON_VIEW_OR_TEMP_TABLE = 1361
TRG_CANT_CHANGE_ROW = 1362
TRG_NO_SUCH_ROW_IN_TRG = 1363
NO_DEFAULT_FOR_FIELD = 1364
DIVISION_BY_ZERO = 1365
TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366
ILLEGAL_VALUE_FOR_TYPE = 1367
VIEW_NONUPD_CHECK = 1368
VIEW_CHECK_FAILED = 1369
PROCACCESS_DENIED_ERROR = 1370
RELAY_LOG_FAIL = 1371
PASSWD_LENGTH = 1372
UNKNOWN_TARGET_BINLOG = 1373
IO_ERR_LOG_INDEX_READ = 1374
BINLOG_PURGE_PROHIBITED = 1375
FSEEK_FAIL = 1376
BINLOG_PURGE_FATAL_ERR = 1377
LOG_IN_USE = 1378
LOG_PURGE_UNKNOWN_ERR = 1379
RELAY_LOG_INIT = 1380
NO_BINARY_LOGGING = 1381
RESERVED_SYNTAX = 1382
WSAS_FAILED = 1383
DIFF_GROUPS_PROC = 1384
NO_GROUP_FOR_PROC = 1385
ORDER_WITH_PROC = 1386
LOGGING_PROHIBIT_CHANGING_OF = 1387
NO_FILE_MAPPING = 1388
WRONG_MAGIC = 1389
PS_MANY_PARAM = 1390
KEY_PART_0 = 1391
VIEW_CHECKSUM = 1392
VIEW_MULTIUPDATE = 1393
VIEW_NO_INSERT_FIELD_LIST = 1394
VIEW_DELETE_MERGE_VIEW = 1395
CANNOT_USER = 1396
XAER_NOTA = 1397
XAER_INVAL = 1398
XAER_RMFAIL = 1399
XAER_OUTSIDE = 1400
XAER_RMERR = 1401
XA_RBROLLBACK = 1402
NONEXISTING_PROC_GRANT = 1403
PROC_AUTO_GRANT_FAIL = 1404
PROC_AUTO_REVOKE_FAIL = 1405
DATA_TOO_LONG = 1406
SP_BAD_SQLSTATE = 1407
STARTUP = 1408
LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409
CANT_CREATE_USER_WITH_GRANT = 1410
WRONG_VALUE_FOR_TYPE = 1411
TABLE_DEF_CHANGED = 1412
SP_DUP_HANDLER = 1413
SP_NOT_VAR_ARG = 1414
SP_NO_RETSET = 1415
CANT_CREATE_GEOMETRY_OBJECT = 1416
FAILED_ROUTINE_BREAK_BINLOG = 1417
BINLOG_UNSAFE_ROUTINE = 1418
BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419
EXEC_STMT_WITH_OPEN_CURSOR = 1420
STMT_HAS_NO_OPEN_CURSOR = 1421
COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422
NO_DEFAULT_FOR_VIEW_FIELD = 1423
SP_NO_RECURSION = 1424
TOO_BIG_SCALE = 1425
TOO_BIG_PRECISION = 1426
M_BIGGER_THAN_D = 1427
WRONG_LOCK_OF_SYSTEM_TABLE = 1428
CONNECT_TO_FOREIGN_DATA_SOURCE = 1429
QUERY_ON_FOREIGN_DATA_SOURCE = 1430
FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431
FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432
FOREIGN_DATA_STRING_INVALID = 1433
CANT_CREATE_FEDERATED_TABLE = 1434
TRG_IN_WRONG_SCHEMA = 1435
STACK_OVERRUN_NEED_MORE = 1436
TOO_LONG_BODY = 1437
WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438
TOO_BIG_DISPLAYWIDTH = 1439
XAER_DUPID = 1440
DATETIME_FUNCTION_OVERFLOW = 1441
CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442
VIEW_PREVENT_UPDATE = 1443
PS_NO_RECURSION = 1444
SP_CANT_SET_AUTOCOMMIT = 1445
MALFORMED_DEFINER = 1446
VIEW_FRM_NO_USER = 1447
VIEW_OTHER_USER = 1448
NO_SUCH_USER = 1449
FORBID_SCHEMA_CHANGE = 1450
ROW_IS_REFERENCED_2 = 1451
NO_REFERENCED_ROW_2 = 1452
SP_BAD_VAR_SHADOW = 1453
TRG_NO_DEFINER = 1454
OLD_FILE_FORMAT = 1455
SP_RECURSION_LIMIT = 1456
SP_PROC_TABLE_CORRUPT = 1457
SP_WRONG_NAME = 1458
TABLE_NEEDS_UPGRADE = 1459
SP_NO_AGGREGATE = 1460
MAX_PREPARED_STMT_COUNT_REACHED = 1461
VIEW_RECURSIVE = 1462
NON_GROUPING_FIELD_USED = 1463
TABLE_CANT_HANDLE_SPKEYS = 1464
NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465
USERNAME = 1466
HOSTNAME = 1467
WRONG_STRING_LENGTH = 1468
ERROR_LAST = 1468
| Python |
SERVER_STATUS_IN_TRANS = 1
SERVER_STATUS_AUTOCOMMIT = 2
SERVER_MORE_RESULTS_EXISTS = 8
SERVER_QUERY_NO_GOOD_INDEX_USED = 16
SERVER_QUERY_NO_INDEX_USED = 32
SERVER_STATUS_CURSOR_EXISTS = 64
SERVER_STATUS_LAST_ROW_SENT = 128
SERVER_STATUS_DB_DROPPED = 256
SERVER_STATUS_NO_BACKSLASH_ESCAPES = 512
SERVER_STATUS_METADATA_CHANGED = 1024
| Python |
LONG_PASSWORD = 1
FOUND_ROWS = 1 << 1
LONG_FLAG = 1 << 2
CONNECT_WITH_DB = 1 << 3
NO_SCHEMA = 1 << 4
COMPRESS = 1 << 5
ODBC = 1 << 6
LOCAL_FILES = 1 << 7
IGNORE_SPACE = 1 << 8
PROTOCOL_41 = 1 << 9
INTERACTIVE = 1 << 10
SSL = 1 << 11
IGNORE_SIGPIPE = 1 << 12
TRANSACTIONS = 1 << 13
SECURE_CONNECTION = 1 << 15
MULTI_STATEMENTS = 1 << 16
MULTI_RESULTS = 1 << 17
CAPABILITIES = LONG_PASSWORD|LONG_FLAG|TRANSACTIONS| \
PROTOCOL_41|SECURE_CONNECTION
| Python |
#!/usr/bin/env python
"""
client module for memcached (memory cache daemon)
Overview
========
See U{the MemCached homepage<http://www.danga.com/memcached>} for more about memcached.
Usage summary
=============
This should give you a feel for how this module operates::
import memcache
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
mc.set("some_key", "Some value")
value = mc.get("some_key")
mc.set("another_key", 3)
mc.delete("another_key")
mc.set("key", "1") # note that the key used for incr/decr must be a string.
mc.incr("key")
mc.decr("key")
The standard way to use memcache with a database is like this::
key = derive_key(obj)
obj = mc.get(key)
if not obj:
obj = backend_api.get(...)
mc.set(key, obj)
# we now have obj, and future passes through this code
# will use the object from the cache.
Detailed Documentation
======================
More detailed documentation is available in the L{Client} class.
"""
import sys
import socket
import time
import os
import re
try:
import cPickle as pickle
except ImportError:
import pickle
from binascii import crc32 # zlib version is not cross-platform
def cmemcache_hash(key):
return((((crc32(key) & 0xffffffff) >> 16) & 0x7fff) or 1)
serverHashFunction = cmemcache_hash
def useOldServerHashFunction():
"""Use the old python-memcache server hash function."""
global serverHashFunction
serverHashFunction = crc32
try:
from zlib import compress, decompress
_supports_compress = True
except ImportError:
_supports_compress = False
# quickly define a decompress just in case we recv compressed data.
def decompress(val):
raise _Error("received compressed data but I don't support compression (import error)")
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Original author: Evan Martin of Danga Interactive
__author__ = "Sean Reifschneider <jafo-memcached@tummy.com>"
__version__ = "1.48"
__copyright__ = "Copyright (C) 2003 Danga Interactive"
# http://en.wikipedia.org/wiki/Python_Software_Foundation_License
__license__ = "Python Software Foundation License"
SERVER_MAX_KEY_LENGTH = 250
# Storing values larger than 1MB requires recompiling memcached. If you do,
# this value can be changed by doing "memcache.SERVER_MAX_VALUE_LENGTH = N"
# after importing this module.
SERVER_MAX_VALUE_LENGTH = 1024*1024
class _Error(Exception):
pass
class _ConnectionDeadError(Exception):
pass
try:
# Only exists in Python 2.4+
from threading import local
except ImportError:
# TODO: add the pure-python local implementation
class local(object):
pass
_DEAD_RETRY = 30 # number of seconds before retrying a dead server.
_SOCKET_TIMEOUT = 3 # number of seconds before sockets timeout.
class Client(local):
"""
Object representing a pool of memcache servers.
See L{memcache} for an overview.
In all cases where a key is used, the key can be either:
1. A simple hashable type (string, integer, etc.).
2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid
making this module calculate a hash value. You may prefer, for
example, to keep all of a given user's objects on the same memcache
server, so you could use the user's unique id as the hash value.
@group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog
@group Insertion: set, add, replace, set_multi
@group Retrieval: get, get_multi
@group Integers: incr, decr
@group Removal: delete, delete_multi
@sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\
set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi
"""
_FLAG_PICKLE = 1<<0
_FLAG_INTEGER = 1<<1
_FLAG_LONG = 1<<2
_FLAG_COMPRESSED = 1<<3
_SERVER_RETRIES = 10 # how many times to try finding a free server.
# exceptions for Client
class MemcachedKeyError(Exception):
pass
class MemcachedKeyLengthError(MemcachedKeyError):
pass
class MemcachedKeyCharacterError(MemcachedKeyError):
pass
class MemcachedKeyNoneError(MemcachedKeyError):
pass
class MemcachedKeyTypeError(MemcachedKeyError):
pass
class MemcachedStringEncodingError(Exception):
pass
def __init__(self, servers, debug=0, pickleProtocol=0,
pickler=pickle.Pickler, unpickler=pickle.Unpickler,
pload=None, pid=None,
server_max_key_length=SERVER_MAX_KEY_LENGTH,
server_max_value_length=SERVER_MAX_VALUE_LENGTH,
dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT,
cache_cas = False):
"""
Create a new Client object with the given list of servers.
@param servers: C{servers} is passed to L{set_servers}.
@param debug: whether to display error messages when a server can't be
contacted.
@param pickleProtocol: number to mandate protocol used by (c)Pickle.
@param pickler: optional override of default Pickler to allow subclassing.
@param unpickler: optional override of default Unpickler to allow subclassing.
@param pload: optional persistent_load function to call on pickle loading.
Useful for cPickle since subclassing isn't allowed.
@param pid: optional persistent_id function to call on pickle storing.
Useful for cPickle since subclassing isn't allowed.
@param dead_retry: number of seconds before retrying a blacklisted
server. Default to 30 s.
@param socket_timeout: timeout in seconds for all calls to a server. Defaults
to 3 seconds.
@param cache_cas: (default False) If true, cas operations will be
cached. WARNING: This cache is not expired internally, if you have
a long-running process you will need to expire it manually via
"client.reset_cas(), or the cache can grow unlimited.
@param server_max_key_length: (default SERVER_MAX_KEY_LENGTH)
Data that is larger than this will not be sent to the server.
@param server_max_value_length: (default SERVER_MAX_VALUE_LENGTH)
Data that is larger than this will not be sent to the server.
"""
local.__init__(self)
self.debug = debug
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.set_servers(servers)
self.stats = {}
self.cache_cas = cache_cas
self.reset_cas()
# Allow users to modify pickling/unpickling behavior
self.pickleProtocol = pickleProtocol
self.pickler = pickler
self.unpickler = unpickler
self.persistent_load = pload
self.persistent_id = pid
self.server_max_key_length = server_max_key_length
self.server_max_value_length = server_max_value_length
# figure out the pickler style
file = StringIO()
try:
pickler = self.pickler(file, protocol = self.pickleProtocol)
self.picklerIsKeyword = True
except TypeError:
self.picklerIsKeyword = False
def reset_cas(self):
"""
Reset the cas cache. This is only used if the Client() object
was created with "cache_cas=True". If used, this cache does not
expire internally, so it can grow unbounded if you do not clear it
yourself.
"""
self.cas_ids = {}
def set_servers(self, servers):
"""
Set the pool of servers used by this client.
@param servers: an array of servers.
Servers can be passed in two forms:
1. Strings of the form C{"host:port"}, which implies a default weight of 1.
2. Tuples of the form C{("host:port", weight)}, where C{weight} is
an integer weight value.
"""
self.servers = [_Host(s, self.debug, dead_retry=self.dead_retry,
socket_timeout=self.socket_timeout)
for s in servers]
self._init_buckets()
def get_stats(self, stat_args = None):
'''Get statistics from each of the servers.
@param stat_args: Additional arguments to pass to the memcache
"stats" command.
@return: A list of tuples ( server_identifier, stats_dictionary ).
The dictionary contains a number of name/value pairs specifying
the name of the status field and the string value associated with
it. The values are not converted from strings.
'''
data = []
for s in self.servers:
if not s.connect(): continue
if s.family == socket.AF_INET:
name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
else:
name = 'unix:%s (%s)' % ( s.address, s.weight )
if not stat_args:
s.send_cmd('stats')
else:
s.send_cmd('stats ' + stat_args)
serverData = {}
data.append(( name, serverData ))
readline = s.readline
while 1:
line = readline()
if not line or line.strip() == 'END': break
stats = line.split(' ', 2)
serverData[stats[1]] = stats[2]
return(data)
def get_slabs(self):
data = []
for s in self.servers:
if not s.connect(): continue
if s.family == socket.AF_INET:
name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
else:
name = 'unix:%s (%s)' % ( s.address, s.weight )
serverData = {}
data.append(( name, serverData ))
s.send_cmd('stats items')
readline = s.readline
while 1:
line = readline()
if not line or line.strip() == 'END': break
item = line.split(' ', 2)
#0 = STAT, 1 = ITEM, 2 = Value
slab = item[1].split(':', 2)
#0 = items, 1 = Slab #, 2 = Name
if slab[1] not in serverData:
serverData[slab[1]] = {}
serverData[slab[1]][slab[2]] = item[2]
return data
def flush_all(self):
'Expire all data currently in the memcache servers.'
for s in self.servers:
if not s.connect(): continue
s.send_cmd('flush_all')
s.expect("OK")
def debuglog(self, str):
if self.debug:
sys.stderr.write("MemCached: %s\n" % str)
def _statlog(self, func):
if func not in self.stats:
self.stats[func] = 1
else:
self.stats[func] += 1
def forget_dead_hosts(self):
"""
Reset every host in the pool to an "alive" state.
"""
for s in self.servers:
s.deaduntil = 0
def _init_buckets(self):
self.buckets = []
for server in self.servers:
for i in range(server.weight):
self.buckets.append(server)
def _get_server(self, key):
if isinstance(key, tuple):
serverhash, key = key
else:
serverhash = serverHashFunction(key)
for i in range(Client._SERVER_RETRIES):
server = self.buckets[serverhash % len(self.buckets)]
if server.connect():
#print "(using server %s)" % server,
return server, key
serverhash = serverHashFunction(str(serverhash) + str(i))
return None, None
def disconnect_all(self):
for s in self.servers:
s.close_socket()
def delete_multi(self, keys, time=0, key_prefix=''):
'''
Delete multiple keys in the memcache doing just one query.
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
>>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
1
>>> mc.delete_multi(['key1', 'key2'])
1
>>> mc.get_multi(['key1', 'key2']) == {}
1
This method is recommended over iterated regular L{delete}s as it reduces total latency, since
your app doesn't have to wait for each round-trip of L{delete} before sending
the next one.
@param keys: An iterable of keys to clear
@param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay.
@param key_prefix: Optional string to prepend to each key when sending to memcache.
See docs for L{get_multi} and L{set_multi}.
@return: 1 if no failure in communication with any memcacheds.
@rtype: int
'''
self._statlog('delete_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
rc = 1
for server in server_keys.iterkeys():
bigcmd = []
write = bigcmd.append
if time != None:
for key in server_keys[server]: # These are mangled keys
write("delete %s %d\r\n" % (key, time))
else:
for key in server_keys[server]: # These are mangled keys
write("delete %s\r\n" % key)
try:
server.send_cmds(''.join(bigcmd))
except socket.error, msg:
rc = 0
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
for server, keys in server_keys.iteritems():
try:
for key in keys:
server.expect("DELETED")
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
rc = 0
return rc
def delete(self, key, time=0):
'''Deletes a key from the memcache.
@return: Nonzero on success.
@param time: number of seconds any subsequent set / update commands
should fail. Defaults to None for no delay.
@rtype: int
'''
self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
self._statlog('delete')
if time != None and time != 0:
cmd = "delete %s %d" % (key, time)
else:
cmd = "delete %s" % key
try:
server.send_cmd(cmd)
line = server.readline()
if line and line.strip() in ['DELETED', 'NOT_FOUND']: return 1
self.debuglog('Delete expected DELETED or NOT_FOUND, got: %s'
% repr(line))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return 0
def incr(self, key, delta=1):
"""
Sends a command to the server to atomically increment the value
for C{key} by C{delta}, or by 1 if C{delta} is unspecified.
Returns None if C{key} doesn't exist on server, otherwise it
returns the new value after incrementing.
Note that the value for C{key} must already exist in the memcache,
and it must be the string representation of an integer.
>>> mc.set("counter", "20") # returns 1, indicating success
1
>>> mc.incr("counter")
21
>>> mc.incr("counter")
22
Overflow on server is not checked. Be aware of values approaching
2**32. See L{decr}.
@param delta: Integer amount to increment by (should be zero or greater).
@return: New value after incrementing.
@rtype: int
"""
return self._incrdecr("incr", key, delta)
def decr(self, key, delta=1):
"""
Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and
new values are capped at 0. If server value is 1, a decrement of 2
returns 0, not -1.
@param delta: Integer amount to decrement by (should be zero or greater).
@return: New value after decrementing.
@rtype: int
"""
return self._incrdecr("decr", key, delta)
def _incrdecr(self, cmd, key, delta):
self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
self._statlog(cmd)
cmd = "%s %s %d" % (cmd, key, delta)
try:
server.send_cmd(cmd)
line = server.readline()
if line == None or line.strip() =='NOT_FOUND': return None
return int(line)
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return None
def add(self, key, val, time = 0, min_compress_len = 0):
'''
Add new key with value.
Like L{set}, but only stores in memcache if the key doesn't already exist.
@return: Nonzero on success.
@rtype: int
'''
return self._set("add", key, val, time, min_compress_len)
def append(self, key, val, time=0, min_compress_len=0):
'''Append the value to the end of the existing key's value.
Only stores in memcache if key already exists.
Also see L{prepend}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("append", key, val, time, min_compress_len)
def prepend(self, key, val, time=0, min_compress_len=0):
'''Prepend the value to the beginning of the existing key's value.
Only stores in memcache if key already exists.
Also see L{append}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("prepend", key, val, time, min_compress_len)
def replace(self, key, val, time=0, min_compress_len=0):
'''Replace existing key with value.
Like L{set}, but only stores in memcache if the key already exists.
The opposite of L{add}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("replace", key, val, time, min_compress_len)
def set(self, key, val, time=0, min_compress_len=0):
'''Unconditionally sets a key to a given value in the memcache.
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
@param min_compress_len: The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
'''
return self._set("set", key, val, time, min_compress_len)
def cas(self, key, val, time=0, min_compress_len=0):
'''Sets a key to a given value in the memcache if it hasn't been
altered since last fetched. (See L{gets}).
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire,
either as a delta number of seconds, or an absolute unix
time-since-the-epoch value. See the memcached protocol docs section
"Storage Commands" for more info on <exptime>. We default to
0 == cache forever.
@param min_compress_len: The threshold length to kick in
auto-compression of the value using the zlib.compress() routine. If
the value being cached is a string, then the length of the string is
measured, else if the value is an object, then the length of the
pickle result is measured. If the resulting attempt at compression
yeilds a larger string than the input, then it is discarded. For
backwards compatability, this parameter defaults to 0, indicating
don't ever try to compress.
'''
return self._set("cas", key, val, time, min_compress_len)
def _map_and_prefix_keys(self, key_iterable, key_prefix):
"""Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of
prefixed key -> original key.
"""
# Check it just once ...
key_extra_len=len(key_prefix)
if key_prefix:
self.check_key(key_prefix)
# server (_Host) -> list of unprefixed server keys in mapping
server_keys = {}
prefixed_to_orig_key = {}
# build up a list for each server of all the keys we want.
for orig_key in key_iterable:
if isinstance(orig_key, tuple):
# Tuple of hashvalue, key ala _get_server(). Caller is essentially telling us what server to stuff this on.
# Ensure call to _get_server gets a Tuple as well.
str_orig_key = str(orig_key[1])
server, key = self._get_server((orig_key[0], key_prefix + str_orig_key)) # Gotta pre-mangle key before hashing to a server. Returns the mangled key.
else:
str_orig_key = str(orig_key) # set_multi supports int / long keys.
server, key = self._get_server(key_prefix + str_orig_key)
# Now check to make sure key length is proper ...
self.check_key(str_orig_key, key_extra_len=key_extra_len)
if not server:
continue
if server not in server_keys:
server_keys[server] = []
server_keys[server].append(key)
prefixed_to_orig_key[key] = orig_key
return (server_keys, prefixed_to_orig_key)
def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):
'''
Sets multiple keys in the memcache doing just one query.
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
>>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
1
This method is recommended over regular L{set} as it lowers the number of
total packets flying around your network, reducing total latency, since
your app doesn't have to wait for each round-trip of L{set} before sending
the next one.
@param mapping: A dict of key/value pairs to set.
@param time: Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
@param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache:
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}, key_prefix='subspace_')
>>> len(notset_keys) == 0
True
>>> mc.get_multi(['subspace_key1', 'subspace_key2']) == {'subspace_key1' : 'val1', 'subspace_key2' : 'val2'}
True
Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache.
In this case, the return result would be the list of notset original keys, prefix not applied.
@param min_compress_len: The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
@return: List of keys which failed to be stored [ memcache out of memory, etc. ].
@rtype: list
'''
self._statlog('set_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(mapping.iterkeys(), key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
notstored = [] # original keys.
for server in server_keys.iterkeys():
bigcmd = []
write = bigcmd.append
try:
for key in server_keys[server]: # These are mangled keys
store_info = self._val_to_store_info(
mapping[prefixed_to_orig_key[key]],
min_compress_len)
if store_info:
write("set %s %d %d %d\r\n%s\r\n" % (key, store_info[0],
time, store_info[1], store_info[2]))
else:
notstored.append(prefixed_to_orig_key[key])
server.send_cmds(''.join(bigcmd))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
# short-circuit if there are no servers, just return all keys
if not server_keys: return(mapping.keys())
for server, keys in server_keys.iteritems():
try:
for key in keys:
line = server.readline()
if line == 'STORED':
continue
else:
notstored.append(prefixed_to_orig_key[key]) #un-mangle.
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return notstored
def _val_to_store_info(self, val, min_compress_len):
"""
Transform val to a storable representation, returning a tuple of the flags, the length of the new value, and the new value itself.
"""
flags = 0
if isinstance(val, str):
pass
elif isinstance(val, int):
flags |= Client._FLAG_INTEGER
val = "%d" % val
# force no attempt to compress this silly string.
min_compress_len = 0
elif isinstance(val, long):
flags |= Client._FLAG_LONG
val = "%d" % val
# force no attempt to compress this silly string.
min_compress_len = 0
else:
flags |= Client._FLAG_PICKLE
file = StringIO()
if self.picklerIsKeyword:
pickler = self.pickler(file, protocol = self.pickleProtocol)
else:
pickler = self.pickler(file, self.pickleProtocol)
if self.persistent_id:
pickler.persistent_id = self.persistent_id
pickler.dump(val)
val = file.getvalue()
lv = len(val)
# We should try to compress if min_compress_len > 0 and we could
# import zlib and this string is longer than our min threshold.
if min_compress_len and _supports_compress and lv > min_compress_len:
comp_val = compress(val)
# Only retain the result if the compression result is smaller
# than the original.
if len(comp_val) < lv:
flags |= Client._FLAG_COMPRESSED
val = comp_val
# silently do not store if value length exceeds maximum
if self.server_max_value_length != 0 and \
len(val) > self.server_max_value_length: return(0)
return (flags, len(val), val)
def _set(self, cmd, key, val, time, min_compress_len = 0):
self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
def _unsafe_set():
self._statlog(cmd)
store_info = self._val_to_store_info(val, min_compress_len)
if not store_info: return(0)
if cmd == 'cas':
if key not in self.cas_ids:
return self._set('set', key, val, time, min_compress_len)
fullcmd = "%s %s %d %d %d %d\r\n%s" % (
cmd, key, store_info[0], time, store_info[1],
self.cas_ids[key], store_info[2])
else:
fullcmd = "%s %s %d %d %d\r\n%s" % (
cmd, key, store_info[0], time, store_info[1], store_info[2])
try:
server.send_cmd(fullcmd)
return(server.expect("STORED") == "STORED")
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return 0
try:
return _unsafe_set()
except _ConnectionDeadError:
# retry once
try:
server._get_socket()
return _unsafe_set()
except (_ConnectionDeadError, socket.error), msg:
server.mark_dead(msg)
return 0
def _get(self, cmd, key):
self.check_key(key)
server, key = self._get_server(key)
if not server:
return None
def _unsafe_get():
self._statlog(cmd)
try:
server.send_cmd("%s %s" % (cmd, key))
rkey = flags = rlen = cas_id = None
if cmd == 'gets':
rkey, flags, rlen, cas_id, = self._expect_cas_value(server)
if rkey and self.cache_cas:
self.cas_ids[rkey] = cas_id
else:
rkey, flags, rlen, = self._expectvalue(server)
if not rkey:
return None
try:
value = self._recv_value(server, flags, rlen)
finally:
server.expect("END")
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return None
return value
try:
return _unsafe_get()
except _ConnectionDeadError:
# retry once
try:
if server.connect():
return _unsafe_get()
return None
except (_ConnectionDeadError, socket.error), msg:
server.mark_dead(msg)
return None
def get(self, key):
'''Retrieves a key from the memcache.
@return: The value or None.
'''
return self._get('get', key)
def gets(self, key):
'''Retrieves a key from the memcache. Used in conjunction with 'cas'.
@return: The value or None.
'''
return self._get('gets', key)
def get_multi(self, keys, key_prefix=''):
'''
Retrieves multiple keys from the memcache doing just one query.
>>> success = mc.set("foo", "bar")
>>> success = mc.set("baz", 42)
>>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42}
1
>>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == []
1
This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'.
>>> mc.get_multi(['k1', 'k2', 'nonexist'], key_prefix='pfx_') == {'k1' : 1, 'k2' : 2}
1
get_mult [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields.
They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix.
In this mode, the key_prefix could be a table name, and the key itself a db primary key number.
>>> mc.set_multi({42: 'douglass adams', 46 : 'and 2 just ahead of me'}, key_prefix='numkeys_') == []
1
>>> mc.get_multi([46, 42], key_prefix='numkeys_') == {42: 'douglass adams', 46 : 'and 2 just ahead of me'}
1
This method is recommended over regular L{get} as it lowers the number of
total packets flying around your network, reducing total latency, since
your app doesn't have to wait for each round-trip of L{get} before sending
the next one.
See also L{set_multi}.
@param keys: An array of keys.
@param key_prefix: A string to prefix each key when we communicate with memcache.
Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix.
@return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the retured dictionary will not have it present.
'''
self._statlog('get_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
for server in server_keys.iterkeys():
try:
server.send_cmd("get %s" % " ".join(server_keys[server]))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
retvals = {}
for server in server_keys.iterkeys():
try:
line = server.readline()
while line and line != 'END':
rkey, flags, rlen = self._expectvalue(server, line)
# Bo Yang reports that this can sometimes be None
if rkey is not None:
val = self._recv_value(server, flags, rlen)
retvals[prefixed_to_orig_key[rkey]] = val # un-prefix returned key.
line = server.readline()
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return retvals
def _expect_cas_value(self, server, line=None):
if not line:
line = server.readline()
if line and line[:5] == 'VALUE':
resp, rkey, flags, len, cas_id = line.split()
return (rkey, int(flags), int(len), int(cas_id))
else:
return (None, None, None, None)
def _expectvalue(self, server, line=None):
if not line:
line = server.readline()
if line and line[:5] == 'VALUE':
resp, rkey, flags, len = line.split()
flags = int(flags)
rlen = int(len)
return (rkey, flags, rlen)
else:
return (None, None, None)
def _recv_value(self, server, flags, rlen):
rlen += 2 # include \r\n
buf = server.recv(rlen)
if len(buf) != rlen:
raise _Error("received %d bytes when expecting %d"
% (len(buf), rlen))
if len(buf) == rlen:
buf = buf[:-2] # strip \r\n
if flags & Client._FLAG_COMPRESSED:
buf = decompress(buf)
if flags == 0 or flags == Client._FLAG_COMPRESSED:
# Either a bare string or a compressed string now decompressed...
val = buf
elif flags & Client._FLAG_INTEGER:
val = int(buf)
elif flags & Client._FLAG_LONG:
val = long(buf)
elif flags & Client._FLAG_PICKLE:
try:
file = StringIO(buf)
unpickler = self.unpickler(file)
if self.persistent_load:
unpickler.persistent_load = self.persistent_load
val = unpickler.load()
except Exception, e:
self.debuglog('Pickle error: %s\n' % e)
return None
else:
self.debuglog("unknown flags on get: %x\n" % flags)
return val
def check_key(self, key, key_extra_len=0):
"""Checks sanity of key. Fails if:
Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength).
Contains control characters (Raises MemcachedKeyCharacterError).
Is not a string (Raises MemcachedStringEncodingError)
Is an unicode string (Raises MemcachedStringEncodingError)
Is not a string (Raises MemcachedKeyError)
Is None (Raises MemcachedKeyError)
"""
if isinstance(key, tuple): key = key[1]
if not key:
raise Client.MemcachedKeyNoneError("Key is None")
if isinstance(key, unicode):
raise Client.MemcachedStringEncodingError(
"Keys must be str()'s, not unicode. Convert your unicode "
"strings using mystring.encode(charset)!")
if not isinstance(key, str):
raise Client.MemcachedKeyTypeError("Key must be str()'s")
if isinstance(key, basestring):
if self.server_max_key_length != 0 and \
len(key) + key_extra_len > self.server_max_key_length:
raise Client.MemcachedKeyLengthError("Key length is > %s"
% self.server_max_key_length)
for char in key:
if ord(char) < 33 or ord(char) == 127:
raise Client.MemcachedKeyCharacterError(
"Control characters not allowed")
class _Host(object):
def __init__(self, host, debug=0, dead_retry=_DEAD_RETRY,
socket_timeout=_SOCKET_TIMEOUT):
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.debug = debug
if isinstance(host, tuple):
host, self.weight = host
else:
self.weight = 1
# parse the connection string
m = re.match(r'^(?P<proto>unix):(?P<path>.*)$', host)
if not m:
m = re.match(r'^(?P<proto>inet):'
r'(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host)
if not m: m = re.match(r'^(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host)
if not m:
raise ValueError('Unable to parse connection string: "%s"' % host)
hostData = m.groupdict()
if hostData.get('proto') == 'unix':
self.family = socket.AF_UNIX
self.address = hostData['path']
else:
self.family = socket.AF_INET
self.ip = hostData['host']
self.port = int(hostData.get('port', 11211))
self.address = ( self.ip, self.port )
self.deaduntil = 0
self.socket = None
self.buffer = ''
def debuglog(self, str):
if self.debug:
sys.stderr.write("MemCached: %s\n" % str)
def _check_dead(self):
if self.deaduntil and self.deaduntil > time.time():
return 1
self.deaduntil = 0
return 0
def connect(self):
if self._get_socket():
return 1
return 0
def mark_dead(self, reason):
self.debuglog("MemCache: %s: %s. Marking dead." % (self, reason))
self.deaduntil = time.time() + self.dead_retry
self.close_socket()
def _get_socket(self):
if self._check_dead():
return None
if self.socket:
return self.socket
s = socket.socket(self.family, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'): s.settimeout(self.socket_timeout)
try:
s.connect(self.address)
except socket.timeout, msg:
self.mark_dead("connect: %s" % msg)
return None
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
self.mark_dead("connect: %s" % msg[1])
return None
self.socket = s
self.buffer = ''
return s
def close_socket(self):
if self.socket:
self.socket.close()
self.socket = None
def send_cmd(self, cmd):
self.socket.sendall(cmd + '\r\n')
def send_cmds(self, cmds):
""" cmds already has trailing \r\n's applied """
self.socket.sendall(cmds)
def readline(self):
buf = self.buffer
recv = self.socket.recv
while True:
index = buf.find('\r\n')
if index >= 0:
break
data = recv(4096)
if not data:
# connection close, let's kill it and raise
self.close_socket()
raise _ConnectionDeadError()
buf += data
self.buffer = buf[index+2:]
return buf[:index]
def expect(self, text):
line = self.readline()
if line != text:
self.debuglog("while expecting '%s', got unexpected response '%s'"
% (text, line))
return line
def recv(self, rlen):
self_socket_recv = self.socket.recv
buf = self.buffer
while len(buf) < rlen:
foo = self_socket_recv(max(rlen - len(buf), 4096))
buf += foo
if not foo:
raise _Error( 'Read %d bytes, expecting %d, '
'read returned 0 length bytes' % ( len(buf), rlen ))
self.buffer = buf[rlen:]
return buf[:rlen]
def __str__(self):
d = ''
if self.deaduntil:
d = " (dead until %d)" % self.deaduntil
if self.family == socket.AF_INET:
return "inet:%s:%d%s" % (self.address[0], self.address[1], d)
else:
return "unix:%s%s" % (self.address, d)
def _doctest():
import doctest, memcache
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
globs = {"mc": mc}
return doctest.testmod(memcache, globs=globs)
if __name__ == "__main__":
failures = 0
print "Testing docstrings..."
_doctest()
print "Running tests:"
print
serverList = [["127.0.0.1:11211"]]
if '--do-unix' in sys.argv:
serverList.append([os.path.join(os.getcwd(), 'memcached.socket')])
for servers in serverList:
mc = Client(servers, debug=1)
def to_s(val):
if not isinstance(val, basestring):
return "%s (%s)" % (val, type(val))
return "%s" % val
def test_setget(key, val):
global failures
print "Testing set/get {'%s': %s} ..." % (to_s(key), to_s(val)),
mc.set(key, val)
newval = mc.get(key)
if newval == val:
print "OK"
return 1
else:
print "FAIL"; failures = failures + 1
return 0
class FooStruct(object):
def __init__(self):
self.bar = "baz"
def __str__(self):
return "A FooStruct"
def __eq__(self, other):
if isinstance(other, FooStruct):
return self.bar == other.bar
return 0
test_setget("a_string", "some random string")
test_setget("an_integer", 42)
if test_setget("long", long(1<<30)):
print "Testing delete ...",
if mc.delete("long"):
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Checking results of delete ..."
if mc.get("long") == None:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing get_multi ...",
print mc.get_multi(["a_string", "an_integer"])
# removed from the protocol
#if test_setget("timed_delete", 'foo'):
# print "Testing timed delete ...",
# if mc.delete("timed_delete", 1):
# print "OK"
# else:
# print "FAIL"; failures = failures + 1
# print "Checking results of timed delete ..."
# if mc.get("timed_delete") == None:
# print "OK"
# else:
# print "FAIL"; failures = failures + 1
print "Testing get(unknown value) ...",
print to_s(mc.get("unknown_value"))
f = FooStruct()
test_setget("foostruct", f)
print "Testing incr ...",
x = mc.incr("an_integer", 1)
if x == 43:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing decr ...",
x = mc.decr("an_integer", 1)
if x == 42:
print "OK"
else:
print "FAIL"; failures = failures + 1
sys.stdout.flush()
# sanity tests
print "Testing sending spaces...",
sys.stdout.flush()
try:
x = mc.set("this has spaces", 1)
except Client.MemcachedKeyCharacterError, msg:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing sending control characters...",
try:
x = mc.set("this\x10has\x11control characters\x02", 1)
except Client.MemcachedKeyCharacterError, msg:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing using insanely long key...",
try:
x = mc.set('a'*SERVER_MAX_KEY_LENGTH, 1)
except Client.MemcachedKeyLengthError, msg:
print "FAIL"; failures = failures + 1
else:
print "OK"
try:
x = mc.set('a'*SERVER_MAX_KEY_LENGTH + 'a', 1)
except Client.MemcachedKeyLengthError, msg:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing sending a unicode-string key...",
try:
x = mc.set(u'keyhere', 1)
except Client.MemcachedStringEncodingError, msg:
print "OK",
else:
print "FAIL",; failures = failures + 1
try:
x = mc.set((u'a'*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
except:
print "FAIL",; failures = failures + 1
else:
print "OK",
import pickle
s = pickle.loads('V\\u4f1a\np0\n.')
try:
x = mc.set((s*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
except Client.MemcachedKeyLengthError:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing using a value larger than the memcached value limit...",
x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH)
if mc.get('keyhere') == None:
print "OK",
else:
print "FAIL",; failures = failures + 1
x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH + 'aaa')
if mc.get('keyhere') == None:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing set_multi() with no memcacheds running",
mc.disconnect_all()
errors = mc.set_multi({'keyhere' : 'a', 'keythere' : 'b'})
if errors != []:
print "FAIL"; failures = failures + 1
else:
print "OK"
print "Testing delete_multi() with no memcacheds running",
mc.disconnect_all()
ret = mc.delete_multi({'keyhere' : 'a', 'keythere' : 'b'})
if ret != 1:
print "FAIL"; failures = failures + 1
else:
print "OK"
if failures > 0:
print '*** THERE WERE FAILED TESTS'
sys.exit(1)
sys.exit(0)
# vim: ts=4 sw=4 et :
| Python |
from gluon.contrib.memcache.memcache import Client
from gluon.cache import CacheAbstract
import time
"""
examle of usage:
cache.memcache = MemcacheClient(request,[127.0.0.1:11211],debug=true)
"""
import cPickle as pickle
import thread
from gluon import current
DEFAULT_TIME_EXPIRE = 300 # seconds (must be the same as cache.ram)
def MemcacheClient(*a, **b):
if not hasattr(current,'__mc_instance'):
current.__memcache_client = MemcacheClientObj(*a, **b)
return current.__memcache_client
class MemcacheClientObj(Client):
meta_storage = {}
max_time_expire = 24*3600
def __init__(self, request, servers, debug=0, pickleProtocol=0,
pickler=pickle.Pickler, unpickler=pickle.Unpickler,
pload=None, pid=None,
default_time_expire = DEFAULT_TIME_EXPIRE):
self.request=request
self.default_time_expire = default_time_expire
if request:
app = request.application
else:
app = ''
Client.__init__(self, servers, debug, pickleProtocol,
pickler, unpickler, pload, pid)
if not app in self.meta_storage:
self.storage = self.meta_storage[app] = {
CacheAbstract.cache_stats_name: {
'hit_total': 0,
'misses': 0,
}}
else:
self.storage = self.meta_storage[app]
def __call__(self, key, f, time_expire = 'default'):
if time_expire == 'default':
time_expire = self.default_time_expire
if time_expire == None:
time_expire = self.max_time_expire
# this must be commented because get and set are redefined
# key = self.__keyFormat__(key)
now = time.time()
value = None
if f is None: # force deletion of value
self.delete(key)
return None
elif time_expire==0: # value forced expired
item = None # value to be computed
else:
item = self.get(key)
if item:
if not isinstance(item,(list,tuple)):
value = item
elif (item[0] < now - time_expire): # value expired
item = None # value to be computed
else:
value = item[1]
if not item:
value = f()
self.set(key, (now,value), self.max_time_expire)
return value
def increment(self, key, value=1, time_expire='default'):
""" time_expire is ignored """
if time_expire == 'default':
time_expire = self.default_time_expire
newKey = self.__keyFormat__(key)
obj = Client.get(self, newKey)
if obj:
if isinstance(obj,(int,float,long)):
return Client.incr(self, newKey, value)
else:
value += obj[1]
Client.set(self,newKey,(time.time(),value),
self.max_time_expire)
return value
else:
Client.set(self, newKey, value, self.max_time_expire)
return value
def set(self, key, value, time_expire='default'):
if time_expire == 'default':
time_expire = self.default_time_expire
newKey = self.__keyFormat__(key)
return Client.set(self, newKey, value, time_expire)
def get(self, key):
newKey = self.__keyFormat__(key)
return Client.get(self, newKey)
def delete(self, key):
newKey = self.__keyFormat__(key)
return Client.delete(self, newKey)
def __keyFormat__(self, key):
return '%s/%s' % (self.request.application, key.replace(' ', '_'))
| Python |
# (c) 2007 Chris AtLee <chris@atlee.ca>
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
PAM module for python
Provides an authenticate function that will allow the caller to authenticate
a user against the Pluggable Authentication Modules (PAM) on the system.
Implemented using ctypes, so no compilation is necessary.
"""
__all__ = ['authenticate']
from ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof
from ctypes import c_void_p, c_uint, c_char_p, c_char, c_int
from ctypes.util import find_library
LIBPAM = CDLL(find_library("pam"))
LIBC = CDLL(find_library("c"))
CALLOC = LIBC.calloc
CALLOC.restype = c_void_p
CALLOC.argtypes = [c_uint, c_uint]
STRDUP = LIBC.strdup
STRDUP.argstypes = [c_char_p]
STRDUP.restype = POINTER(c_char) # NOT c_char_p !!!!
# Various constants
PAM_PROMPT_ECHO_OFF = 1
PAM_PROMPT_ECHO_ON = 2
PAM_ERROR_MSG = 3
PAM_TEXT_INFO = 4
class PamHandle(Structure):
"""wrapper class for pam_handle_t"""
_fields_ = [
("handle", c_void_p)
]
def __init__(self):
Structure.__init__(self)
self.handle = 0
class PamMessage(Structure):
"""wrapper class for pam_message structure"""
_fields_ = [
("msg_style", c_int),
("msg", c_char_p),
]
def __repr__(self):
return "<PamMessage %i '%s'>" % (self.msg_style, self.msg)
class PamResponse(Structure):
"""wrapper class for pam_response structure"""
_fields_ = [
("resp", c_char_p),
("resp_retcode", c_int),
]
def __repr__(self):
return "<PamResponse %i '%s'>" % (self.resp_retcode, self.resp)
CONV_FUNC = CFUNCTYPE(c_int,
c_int, POINTER(POINTER(PamMessage)),
POINTER(POINTER(PamResponse)), c_void_p)
class PamConv(Structure):
"""wrapper class for pam_conv structure"""
_fields_ = [
("conv", CONV_FUNC),
("appdata_ptr", c_void_p)
]
PAM_START = LIBPAM.pam_start
PAM_START.restype = c_int
PAM_START.argtypes = [c_char_p, c_char_p, POINTER(PamConv),
POINTER(PamHandle)]
PAM_AUTHENTICATE = LIBPAM.pam_authenticate
PAM_AUTHENTICATE.restype = c_int
PAM_AUTHENTICATE.argtypes = [PamHandle, c_int]
def authenticate(username, password, service='login'):
"""Returns True if the given username and password authenticate for the
given service. Returns False otherwise
``username``: the username to authenticate
``password``: the password in plain text
``service``: the PAM service to authenticate against.
Defaults to 'login'"""
@CONV_FUNC
def my_conv(n_messages, messages, p_response, app_data):
"""Simple conversation function that responds to any
prompt where the echo is off with the supplied password"""
# Create an array of n_messages response objects
addr = CALLOC(n_messages, sizeof(PamResponse))
p_response[0] = cast(addr, POINTER(PamResponse))
for i in range(n_messages):
if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
pw_copy = STRDUP(str(password))
p_response.contents[i].resp = cast(pw_copy, c_char_p)
p_response.contents[i].resp_retcode = 0
return 0
handle = PamHandle()
conv = PamConv(my_conv, 0)
retval = PAM_START(service, username, pointer(conv), pointer(handle))
if retval != 0:
# TODO: This is not an authentication error, something
# has gone wrong starting up PAM
return False
retval = PAM_AUTHENTICATE(handle, 0)
return retval == 0
if __name__ == "__main__":
import getpass
print authenticate(getpass.getuser(), getpass.getpass())
| Python |
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Modified by Massimo Di Pierro so it works with and without GAE with web2py
# the modified version of this file is still released under the original Apache license
# and it is not released under the web2py license.
#
# This should be compatible with the Apache license since it states:
# "For the purposes of this License, Derivative Works shall not include works
# that remain separable from, or merely link (or bind by name) to the interfaces of,
# the Work and Derivative Works thereof."
#
# In fact this file is Apache-licensed and it is separable from the rest of web2py.
"""
An interactive, stateful AJAX shell that runs Python code on the server.
"""
import logging
import new
import os
import cPickle
import sys
import traceback
import types
import wsgiref.handlers
import StringIO
import threading
locker = threading.RLock()
# Set to True if stack traces should be shown in the browser, etc.
_DEBUG = True
# The entity kind for shell historys. Feel free to rename to suit your app.
_HISTORY_KIND = '_Shell_History'
# Types that can't be pickled.
UNPICKLABLE_TYPES = (
types.ModuleType,
types.TypeType,
types.ClassType,
types.FunctionType,
)
# Unpicklable statements to seed new historys with.
INITIAL_UNPICKLABLES = [
'import logging',
'import os',
'import sys',
]
class History:
"""A shell history. Stores the history's globals.
Each history globals is stored in one of two places:
If the global is picklable, it's stored in the parallel globals and
global_names list properties. (They're parallel lists to work around the
unfortunate fact that the datastore can't store dictionaries natively.)
If the global is not picklable (e.g. modules, classes, and functions), or if
it was created by the same statement that created an unpicklable global,
it's not stored directly. Instead, the statement is stored in the
unpicklables list property. On each request, before executing the current
statement, the unpicklable statements are evaluated to recreate the
unpicklable globals.
The unpicklable_names property stores all of the names of globals that were
added by unpicklable statements. When we pickle and store the globals after
executing a statement, we skip the ones in unpicklable_names.
Using Text instead of string is an optimization. We don't query on any of
these properties, so they don't need to be indexed.
"""
global_names = []
globals = []
unpicklable_names = []
unpicklables = []
def set_global(self, name, value):
"""Adds a global, or updates it if it already exists.
Also removes the global from the list of unpicklable names.
Args:
name: the name of the global to remove
value: any picklable value
"""
blob = cPickle.dumps(value)
if name in self.global_names:
index = self.global_names.index(name)
self.globals[index] = blob
else:
self.global_names.append(name)
self.globals.append(blob)
self.remove_unpicklable_name(name)
def remove_global(self, name):
"""Removes a global, if it exists.
Args:
name: string, the name of the global to remove
"""
if name in self.global_names:
index = self.global_names.index(name)
del self.global_names[index]
del self.globals[index]
def globals_dict(self):
"""Returns a dictionary view of the globals.
"""
return dict((name, cPickle.loads(val))
for name, val in zip(self.global_names, self.globals))
def add_unpicklable(self, statement, names):
"""Adds a statement and list of names to the unpicklables.
Also removes the names from the globals.
Args:
statement: string, the statement that created new unpicklable global(s).
names: list of strings; the names of the globals created by the statement.
"""
self.unpicklables.append(statement)
for name in names:
self.remove_global(name)
if name not in self.unpicklable_names:
self.unpicklable_names.append(name)
def remove_unpicklable_name(self, name):
"""Removes a name from the list of unpicklable names, if it exists.
Args:
name: string, the name of the unpicklable global to remove
"""
if name in self.unpicklable_names:
self.unpicklable_names.remove(name)
def represent(obj):
"""Returns a string representing the given object's value, which should allow the
code below to determine whether the object changes over time.
"""
try:
return cPickle.dumps(obj)
except:
return repr(obj)
def run(history, statement, env={}):
"""
Evaluates a python statement in a given history and returns the result.
"""
history.unpicklables = INITIAL_UNPICKLABLES
# extract the statement to be run
if not statement:
return ''
# the python compiler doesn't like network line endings
statement = statement.replace('\r\n', '\n')
# add a couple newlines at the end of the statement. this makes
# single-line expressions such as 'class Foo: pass' evaluate happily.
statement += '\n\n'
# log and compile the statement up front
try:
logging.info('Compiling and evaluating:\n%s' % statement)
compiled = compile(statement, '<string>', 'single')
except:
return str(traceback.format_exc())
# create a dedicated module to be used as this statement's __main__
statement_module = new.module('__main__')
# use this request's __builtin__, since it changes on each request.
# this is needed for import statements, among other things.
import __builtin__
statement_module.__builtins__ = __builtin__
# load the history from the datastore
history = History()
# swap in our custom module for __main__. then unpickle the history
# globals, run the statement, and re-pickle the history globals, all
# inside it.
old_main = sys.modules.get('__main__')
output = StringIO.StringIO()
try:
sys.modules['__main__'] = statement_module
statement_module.__name__ = '__main__'
statement_module.__dict__.update(env)
# re-evaluate the unpicklables
for code in history.unpicklables:
exec code in statement_module.__dict__
# re-initialize the globals
for name, val in history.globals_dict().items():
try:
statement_module.__dict__[name] = val
except:
msg = 'Dropping %s since it could not be unpickled.\n' % name
output.write(msg)
logging.warning(msg + traceback.format_exc())
history.remove_global(name)
# run!
old_globals = dict((key, represent(
value)) for key, value in statement_module.__dict__.items())
try:
old_stdout, old_stderr = sys.stdout, sys.stderr
try:
sys.stderr = sys.stdout = output
locker.acquire()
exec compiled in statement_module.__dict__
finally:
locker.release()
sys.stdout, sys.stderr = old_stdout, old_stderr
except:
output.write(str(traceback.format_exc()))
return output.getvalue()
# extract the new globals that this statement added
new_globals = {}
for name, val in statement_module.__dict__.items():
if name not in old_globals or represent(val) != old_globals[name]:
new_globals[name] = val
if True in [isinstance(val, UNPICKLABLE_TYPES)
for val in new_globals.values()]:
# this statement added an unpicklable global. store the statement and
# the names of all of the globals it added in the unpicklables.
history.add_unpicklable(statement, new_globals.keys())
logging.debug('Storing this statement as an unpicklable.')
else:
# this statement didn't add any unpicklables. pickle and store the
# new globals back into the datastore.
for name, val in new_globals.items():
if not name.startswith('__'):
history.set_global(name, val)
finally:
sys.modules['__main__'] = old_main
return output.getvalue()
if __name__ == '__main__':
history = History()
while True:
print run(history, raw_input('>>> ')).rstrip()
| Python |
"""
Developed by niphlod@gmail.com
"""
import redis
from redis.exceptions import ConnectionError
from gluon import current
from gluon.storage import Storage
import cPickle as pickle
import time
import re
import logging
import thread
logger = logging.getLogger("web2py.session.redis")
locker = thread.allocate_lock()
def RedisSession(*args, **vars):
"""
Usage example: put in models
from gluon.contrib.redis_session import RedisSession
sessiondb = RedisSession('localhost:6379',db=0, session_expiry=False)
session.connect(request, response, db = sessiondb)
Simple slip-in storage for session
"""
locker.acquire()
try:
instance_name = 'redis_instance_' + current.request.application
if not hasattr(RedisSession, instance_name):
setattr(RedisSession, instance_name, RedisClient(*args, **vars))
return getattr(RedisSession, instance_name)
finally:
locker.release()
class RedisClient(object):
meta_storage = {}
MAX_RETRIES = 5
RETRIES = 0
def __init__(self, server='localhost:6379', db=None, debug=False, session_expiry=False):
"""session_expiry can be an integer, in seconds, to set the default expiration
of sessions. The corresponding record will be deleted from the redis instance,
and there's virtually no need to run sessions2trash.py
"""
self.server = server
self.db = db or 0
host, port = (self.server.split(':') + ['6379'])[:2]
port = int(port)
self.debug = debug
if current and current.request:
self.app = current.request.application
else:
self.app = ''
self.r_server = redis.Redis(host=host, port=port, db=self.db)
self.tablename = None
self.session_expiry = session_expiry
def get(self, what, default):
return self.tablename
def Field(self, fieldname, type='string', length=None, default=None,
required=False, requires=None):
return None
def define_table(self, tablename, *fields, **args):
if not self.tablename:
self.tablename = MockTable(
self, self.r_server, tablename, self.session_expiry)
return self.tablename
def __getitem__(self, key):
return self.tablename
def __call__(self, where=''):
q = self.tablename.query
return q
def commit(self):
#this is only called by session2trash.py
pass
class MockTable(object):
def __init__(self, db, r_server, tablename, session_expiry):
self.db = db
self.r_server = r_server
self.tablename = tablename
#set the namespace for sessions of this app
self.keyprefix = 'w2p:sess:%s' % tablename.replace(
'web2py_session_', '')
#fast auto-increment id (needed for session handling)
self.serial = "%s:serial" % self.keyprefix
#index of all the session keys of this app
self.id_idx = "%s:id_idx" % self.keyprefix
#remember the session_expiry setting
self.session_expiry = session_expiry
def getserial(self):
#return an auto-increment id
return "%s" % self.r_server.incr(self.serial, 1)
def __getattr__(self, key):
if key == 'id':
#return a fake query. We need to query it just by id for normal operations
self.query = MockQuery(field='id', db=self.r_server, prefix=self.keyprefix, session_expiry=self.session_expiry)
return self.query
elif key == '_db':
#needed because of the calls in sessions2trash.py and globals.py
return self.db
def insert(self, **kwargs):
#usually kwargs would be a Storage with several keys:
#'locked', 'client_ip','created_datetime','modified_datetime'
#'unique_key', 'session_data'
#retrieve a new key
newid = self.getserial()
key = "%s:%s" % (self.keyprefix, newid)
#add it to the index
self.r_server.sadd(self.id_idx, key)
#set a hash key with the Storage
self.r_server.hmset(key, kwargs)
if self.session_expiry:
self.r_server.expire(key, self.session_expiry)
return newid
class MockQuery(object):
"""a fake Query object that supports querying by id
and listing all keys. No other operation is supported
"""
def __init__(self, field=None, db=None, prefix=None, session_expiry=False):
self.field = field
self.value = None
self.db = db
self.keyprefix = prefix
self.op = None
self.session_expiry = session_expiry
def __eq__(self, value, op='eq'):
self.value = value
self.op = op
def __gt__(self, value, op='ge'):
self.value = value
self.op = op
def select(self):
if self.op == 'eq' and self.field == 'id' and self.value:
#means that someone wants to retrieve the key self.value
rtn = self.db.hgetall("%s:%s" % (self.keyprefix, self.value))
if rtn == dict():
#return an empty resultset for non existing key
return []
else:
return [Storage(rtn)]
elif self.op == 'ge' and self.field == 'id' and self.value == 0:
#means that someone wants the complete list
rtn = []
id_idx = "%s:id_idx" % self.keyprefix
#find all session keys of this app
allkeys = self.db.smembers(id_idx)
for sess in allkeys:
val = self.db.hgetall(sess)
if val == dict():
if self.session_expiry:
#clean up the idx, because the key expired
self.db.srem(id_idx, sess)
continue
else:
continue
val = Storage(val)
#add a delete_record method (necessary for sessions2trash.py)
val.delete_record = RecordDeleter(
self.db, sess, self.keyprefix)
rtn.append(val)
return rtn
else:
raise Exception("Operation not supported")
def update(self, **kwargs):
#means that the session has been found and needs an update
if self.op == 'eq' and self.field == 'id' and self.value:
key = "%s:%s" % (self.keyprefix, self.value)
rtn = self.db.hmset(key, kwargs)
if self.session_expiry:
self.db.expire(key, self.session_expiry)
return rtn
class RecordDeleter(object):
"""Dumb record deleter to support sessions2trash.py"""
def __init__(self, db, key, keyprefix):
self.db, self.key, self.keyprefix = db, key, keyprefix
def __call__(self):
id_idx = "%s:id_idx" % self.keyprefix
#remove from the index
self.db.srem(id_idx, self.key)
#remove the key itself
self.db.delete(self.key)
| Python |
# vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2007-2009, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "Mathieu Fenniak"
__version__ = "1.10"
import datetime
import time
import interface
import types
import threading
from errors import *
from warnings import warn
##
# The DBAPI level supported. Currently 2.0. This property is part of the
# DBAPI 2.0 specification.
apilevel = "2.0"
##
# Integer constant stating the level of thread safety the DBAPI interface
# supports. This DBAPI interface supports sharing of the module, connections,
# and cursors. This property is part of the DBAPI 2.0 specification.
threadsafety = 3
##
# String property stating the type of parameter marker formatting expected by
# the interface. This value defaults to "format". This property is part of
# the DBAPI 2.0 specification.
# <p>
# Unlike the DBAPI specification, this value is not constant. It can be
# changed to any standard paramstyle value (ie. qmark, numeric, named, format,
# and pyformat).
paramstyle = 'format' # paramstyle can be changed to any DB-API paramstyle
def convert_paramstyle(src_style, query, args):
# I don't see any way to avoid scanning the query string char by char,
# so we might as well take that careful approach and create a
# state-based scanner. We'll use int variables for the state.
# 0 -- outside quoted string
# 1 -- inside single-quote string '...'
# 2 -- inside quoted identifier "..."
# 3 -- inside escaped single-quote string, E'...'
state = 0
output_query = ""
output_args = []
if src_style == "numeric":
output_args = args
elif src_style in ("pyformat", "named"):
mapping_to_idx = {}
i = 0
while 1:
if i == len(query):
break
c = query[i]
# print "begin loop", repr(i), repr(c), repr(state)
if state == 0:
if c == "'":
i += 1
output_query += c
state = 1
elif c == '"':
i += 1
output_query += c
state = 2
elif c == 'E':
# check for escaped single-quote string
i += 1
if i < len(query) and i > 1 and query[i] == "'":
i += 1
output_query += "E'"
state = 3
else:
output_query += c
elif src_style == "qmark" and c == "?":
i += 1
param_idx = len(output_args)
if param_idx == len(args):
raise QueryParameterIndexError("too many parameter fields, not enough parameters")
output_args.append(args[param_idx])
output_query += "$" + str(param_idx + 1)
elif src_style == "numeric" and c == ":":
i += 1
if i < len(query) and i > 1 and query[i].isdigit():
output_query += "$" + query[i]
i += 1
else:
raise QueryParameterParseError("numeric parameter : does not have numeric arg")
elif src_style == "named" and c == ":":
name = ""
while 1:
i += 1
if i == len(query):
break
c = query[i]
if c.isalnum() or c == '_':
name += c
else:
break
if name == "":
raise QueryParameterParseError("empty name of named parameter")
idx = mapping_to_idx.get(name)
if idx == None:
idx = len(output_args)
output_args.append(args[name])
idx += 1
mapping_to_idx[name] = idx
output_query += "$" + str(idx)
elif src_style == "format" and c == "%":
i += 1
if i < len(query) and i > 1:
if query[i] == "s":
param_idx = len(output_args)
if param_idx == len(args):
raise QueryParameterIndexError("too many parameter fields, not enough parameters")
output_args.append(args[param_idx])
output_query += "$" + str(param_idx + 1)
elif query[i] == "%":
output_query += "%"
else:
raise QueryParameterParseError("Only %s and %% are supported")
i += 1
else:
raise QueryParameterParseError("format parameter % does not have format code")
elif src_style == "pyformat" and c == "%":
i += 1
if i < len(query) and i > 1:
if query[i] == "(":
i += 1
# begin mapping name
end_idx = query.find(')', i)
if end_idx == -1:
raise QueryParameterParseError("began pyformat dict read, but couldn't find end of name")
else:
name = query[i:end_idx]
i = end_idx + 1
if i < len(query) and query[i] == "s":
i += 1
idx = mapping_to_idx.get(name)
if idx == None:
idx = len(output_args)
output_args.append(args[name])
idx += 1
mapping_to_idx[name] = idx
output_query += "$" + str(idx)
else:
raise QueryParameterParseError("format not specified or not supported (only %(...)s supported)")
elif query[i] == "%":
output_query += "%"
elif query[i] == "s":
# we have a %s in a pyformat query string. Assume
# support for format instead.
i -= 1
src_style = "format"
else:
raise QueryParameterParseError("Only %(name)s, %s and %% are supported")
else:
i += 1
output_query += c
elif state == 1:
output_query += c
i += 1
if c == "'":
# Could be a double ''
if i < len(query) and query[i] == "'":
# is a double quote.
output_query += query[i]
i += 1
else:
state = 0
elif src_style in ("pyformat","format") and c == "%":
# hm... we're only going to support an escaped percent sign
if i < len(query):
if query[i] == "%":
# good. We already output the first percent sign.
i += 1
else:
raise QueryParameterParseError("'%" + query[i] + "' not supported in quoted string")
elif state == 2:
output_query += c
i += 1
if c == '"':
state = 0
elif src_style in ("pyformat","format") and c == "%":
# hm... we're only going to support an escaped percent sign
if i < len(query):
if query[i] == "%":
# good. We already output the first percent sign.
i += 1
else:
raise QueryParameterParseError("'%" + query[i] + "' not supported in quoted string")
elif state == 3:
output_query += c
i += 1
if c == "\\":
# check for escaped single-quote
if i < len(query) and query[i] == "'":
output_query += "'"
i += 1
elif c == "'":
state = 0
elif src_style in ("pyformat","format") and c == "%":
# hm... we're only going to support an escaped percent sign
if i < len(query):
if query[i] == "%":
# good. We already output the first percent sign.
i += 1
else:
raise QueryParameterParseError("'%" + query[i] + "' not supported in quoted string")
return output_query, tuple(output_args)
def require_open_cursor(fn):
def _fn(self, *args, **kwargs):
if self.cursor == None:
raise CursorClosedError()
return fn(self, *args, **kwargs)
return _fn
##
# The class of object returned by the {@link #ConnectionWrapper.cursor cursor method}.
class CursorWrapper(object):
def __init__(self, conn, connection):
self.cursor = interface.Cursor(conn)
self.arraysize = 1
self._connection = connection
self._override_rowcount = None
##
# This read-only attribute returns a reference to the connection object on
# which the cursor was created.
# <p>
# Stability: Part of a DBAPI 2.0 extension. A warning "DB-API extension
# cursor.connection used" will be fired.
connection = property(lambda self: self._getConnection())
def _getConnection(self):
warn("DB-API extension cursor.connection used", stacklevel=3)
return self._connection
##
# This read-only attribute specifies the number of rows that the last
# .execute*() produced (for DQL statements like 'select') or affected (for
# DML statements like 'update' or 'insert').
# <p>
# The attribute is -1 in case no .execute*() has been performed on the
# cursor or the rowcount of the last operation is cannot be determined by
# the interface.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
rowcount = property(lambda self: self._getRowCount())
@require_open_cursor
def _getRowCount(self):
if self._override_rowcount != None:
return self._override_rowcount
return self.cursor.row_count
##
# This read-only attribute is a sequence of 7-item sequences. Each value
# contains information describing one result column. The 7 items returned
# for each column are (name, type_code, display_size, internal_size,
# precision, scale, null_ok). Only the first two values are provided by
# this interface implementation.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
description = property(lambda self: self._getDescription())
@require_open_cursor
def _getDescription(self):
if self.cursor.row_description == None:
return None
columns = []
for col in self.cursor.row_description:
columns.append((col["name"], col["type_oid"], None, None, None, None, None))
return columns
##
# Executes a database operation. Parameters may be provided as a sequence
# or mapping and will be bound to variables in the operation.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_cursor
def execute(self, operation, args=()):
if not self._connection.in_transaction:
self._connection.begin()
self._override_rowcount = None
self._execute(operation, args)
def _execute(self, operation, args=()):
new_query, new_args = convert_paramstyle(paramstyle, operation, args)
try:
self.cursor.execute(new_query, *new_args)
except ConnectionClosedError:
# can't rollback in this case
raise
except:
# any error will rollback the transaction to-date
self._connection.rollback()
raise
def copy_from(self, fileobj, table=None, sep='\t', null=None, query=None):
if query == None:
if table == None:
raise CopyQueryOrTableRequiredError()
query = "COPY %s FROM stdout DELIMITER '%s'" % (table, sep)
if null is not None:
query += " NULL '%s'" % (null,)
self.copy_execute(fileobj, query)
def copy_to(self, fileobj, table=None, sep='\t', null=None, query=None):
if query == None:
if table == None:
raise CopyQueryOrTableRequiredError()
query = "COPY %s TO stdout DELIMITER '%s'" % (table, sep)
if null is not None:
query += " NULL '%s'" % (null,)
self.copy_execute(fileobj, query)
@require_open_cursor
def copy_execute(self, fileobj, query):
try:
self.cursor.execute(query, stream=fileobj)
except ConnectionClosedError:
# can't rollback in this case
raise
except:
# any error will rollback the transaction to-date
import traceback; traceback.print_exc()
self._connection.rollback()
raise
##
# Prepare a database operation and then execute it against all parameter
# sequences or mappings provided.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_cursor
def executemany(self, operation, parameter_sets):
if not self._connection.in_transaction:
self._connection.begin()
self._override_rowcount = 0
for parameters in parameter_sets:
self._execute(operation, parameters)
if self.cursor.row_count == -1 or self._override_rowcount == -1:
self._override_rowcount = -1
else:
self._override_rowcount += self.cursor.row_count
##
# Fetch the next row of a query result set, returning a single sequence, or
# None when no more data is available.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_cursor
def fetchone(self):
return self.cursor.read_tuple()
##
# Fetch the next set of rows of a query result, returning a sequence of
# sequences. An empty sequence is returned when no more rows are
# available.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
# @param size The number of rows to fetch when called. If not provided,
# the arraysize property value is used instead.
def fetchmany(self, size=None):
if size == None:
size = self.arraysize
rows = []
for i in range(size):
value = self.fetchone()
if value == None:
break
rows.append(value)
return rows
##
# Fetch all remaining rows of a query result, returning them as a sequence
# of sequences.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_cursor
def fetchall(self):
return tuple(self.cursor.iterate_tuple())
##
# Close the cursor.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_cursor
def close(self):
self.cursor.close()
self.cursor = None
self._override_rowcount = None
def next(self):
warn("DB-API extension cursor.next() used", stacklevel=2)
retval = self.fetchone()
if retval == None:
raise StopIteration()
return retval
def __iter__(self):
warn("DB-API extension cursor.__iter__() used", stacklevel=2)
return self
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
@require_open_cursor
def fileno(self):
return self.cursor.fileno()
@require_open_cursor
def isready(self):
return self.cursor.isready()
def require_open_connection(fn):
def _fn(self, *args, **kwargs):
if self.conn == None:
raise ConnectionClosedError()
return fn(self, *args, **kwargs)
return _fn
##
# The class of object returned by the {@link #connect connect method}.
class ConnectionWrapper(object):
# DBAPI Extension: supply exceptions as attributes on the connection
Warning = property(lambda self: self._getError(Warning))
Error = property(lambda self: self._getError(Error))
InterfaceError = property(lambda self: self._getError(InterfaceError))
DatabaseError = property(lambda self: self._getError(DatabaseError))
OperationalError = property(lambda self: self._getError(OperationalError))
IntegrityError = property(lambda self: self._getError(IntegrityError))
InternalError = property(lambda self: self._getError(InternalError))
ProgrammingError = property(lambda self: self._getError(ProgrammingError))
NotSupportedError = property(lambda self: self._getError(NotSupportedError))
def _getError(self, error):
warn("DB-API extension connection.%s used" % error.__name__, stacklevel=3)
return error
@property
def in_transaction(self):
if self.conn:
return self.conn.in_transaction
return False
def __init__(self, **kwargs):
self.conn = interface.Connection(**kwargs)
self.notifies = []
self.notifies_lock = threading.Lock()
self.conn.NotificationReceived += self._notificationReceived
# Two Phase Commit internal attributes:
self.__tpc_xid = None
self.__tpc_prepared = None
def set_autocommit(self, state):
if self.conn.in_transaction and state and not self.conn.autocommit:
warn("enabling autocommit in an open transaction!")
self.conn.autocommit = state
def get_autocommit(self):
return self.conn.autocommit
autocommit = property(get_autocommit, set_autocommit)
@require_open_connection
def begin(self):
self.conn.begin()
def _notificationReceived(self, notice):
try:
# psycopg2 compatible notification interface
self.notifies_lock.acquire()
self.notifies.append((notice.backend_pid, notice.condition))
finally:
self.notifies_lock.release()
##
# Creates a {@link #CursorWrapper CursorWrapper} object bound to this
# connection.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_connection
def cursor(self):
return CursorWrapper(self.conn, self)
##
# Commits the current database transaction.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_connection
def commit(self):
# There's a threading bug here. If a query is sent after the
# commit, but before the begin, it will be executed immediately
# without a surrounding transaction. Like all threading bugs -- it
# sounds unlikely, until it happens every time in one
# application... however, to fix this, we need to lock the
# database connection entirely, so that no cursors can execute
# statements on other threads. Support for that type of lock will
# be done later.
if self.__tpc_xid:
raise ProgrammingError("Cannot do a normal commit() inside a "
"TPC transaction!")
self.conn.commit()
##
# Rolls back the current database transaction.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_connection
def rollback(self):
# see bug description in commit.
if self.__tpc_xid:
raise ProgrammingError("Cannot do a normal rollback() inside a "
"TPC transaction!")
self.conn.rollback()
##
# Closes the database connection.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_connection
def close(self):
self.conn.close()
self.conn = None
##
# Returns the "server_version" string provided by the connected server.
# <p>
# Stability: Extension of the DBAPI 2.0 specification.
@property
@require_open_connection
def server_version(self):
return self.conn.server_version()
# Stability: psycopg2 compatibility
@require_open_connection
def set_client_encoding(self, encoding=None):
"Set the client encoding for the current session"
if encoding:
self.conn.execute("SET client_encoding TO '%s';" % (encoding, ), simple_query=True)
return self.conn.encoding()
def xid(self,format_id, global_transaction_id, branch_qualifier):
"""Create a Transaction IDs (only global_transaction_id is used in pg)
format_id and branch_qualifier are not used in postgres
global_transaction_id may be any string identifier supported by postgres
returns a tuple (format_id, global_transaction_id, branch_qualifier)"""
return (format_id, global_transaction_id, branch_qualifier)
@require_open_connection
def tpc_begin(self,xid):
"Begin a two-phase transaction"
# set auto-commit mode to begin a TPC transaction
self.autocommit = False
# (actually in postgres at this point it is a normal one)
if self.conn.in_transaction:
warn("tpc_begin() should be called outside a transaction block",
stacklevel=3)
self.conn.begin()
# store actual TPC transaction id
self.__tpc_xid = xid
self.__tpc_prepared = False
@require_open_connection
def tpc_prepare(self):
"Prepare a two-phase transaction"
if not self.__tpc_xid:
raise ProgrammingError("tpc_prepare() outside a TPC transaction "
"is not allowed!")
# Prepare the TPC
self.conn.execute("PREPARE TRANSACTION '%s';" % (self.__tpc_xid[1],),
simple_query=True)
self.conn.in_transaction = False
self.__tpc_prepared = True
@require_open_connection
def tpc_commit(self, xid=None):
"Commit a prepared two-phase transaction"
try:
# save current autocommit status (to be recovered later)
previous_autocommit_mode = self.autocommit
if not xid:
# use current tpc transaction
tpc_xid = self.__tpc_xid
else:
# use a recovered tpc transaction
tpc_xid = xid
if not xid in self.tpc_recover():
raise ProgrammingError("Requested TPC transaction is not "
"prepared!")
if not tpc_xid:
raise ProgrammingError("Cannot tpc_commit() without a TPC "
"transaction!")
if self.__tpc_prepared or (xid != self.__tpc_xid and xid):
# a two-phase commit:
# set the auto-commit mode for TPC commit
self.autocommit = True
try:
self.conn.execute("COMMIT PREPARED '%s';" % (tpc_xid[1], ),
simple_query=True)
finally:
# return to previous auto-commit mode
self.autocommit = previous_autocommit_mode
else:
try:
# a single-phase commit
self.conn.commit()
finally:
# return to previous auto-commit mode
self.autocommit = previous_autocommit_mode
finally:
# transaction is done, clear xid
self.__tpc_xid = None
@require_open_connection
def tpc_rollback(self, xid=None):
"Commit a prepared two-phase transaction"
try:
# save current autocommit status (to be recovered later)
previous_autocommit_mode = self.autocommit
if not xid:
# use current tpc transaction
tpc_xid = self.__tpc_xid
else:
# use a recovered tpc transaction
tpc_xid = xid
if not xid in self.tpc_recover():
raise ProgrammingError("Requested TPC transaction is not prepared!")
if not tpc_xid:
raise ProgrammingError("Cannot tpc_rollback() without a TPC prepared transaction!")
if self.__tpc_prepared or (xid != self.__tpc_xid and xid):
# a two-phase rollback
# set auto-commit for the TPC rollback
self.autocommit = True
try:
self.conn.execute("ROLLBACK PREPARED '%s';" % (tpc_xid[1],),
simple_query=True)
finally:
# return to previous auto-commit mode
self.autocommit = previous_autocommit_mode
else:
# a single-phase rollback
try:
self.conn.rollback()
finally:
# return to previous auto-commit mode
self.autocommit = previous_autocommit_mode
finally:
# transaction is done, clear xid
self.__tpc_xid = None
@require_open_connection
def tpc_recover(self):
"Returns a list of pending transaction IDs"
previous_autocommit_mode = self.autocommit
if not self.conn.in_transaction and not self.autocommit:
self.autocommit = True
elif not self.autocommit:
warn("tpc_recover() will open a transaction block", stacklevel=3)
curs = self.cursor()
xids = []
try:
# query system view that stores open (prepared) TPC transactions
curs.execute("SELECT gid FROM pg_prepared_xacts;");
xids.extend([self.xid(0,row[0],'') for row in curs])
finally:
curs.close()
# return to previous auto-commit mode
self.autocommit = previous_autocommit_mode
# return a list of TPC transaction ids (xid)
return xids
##
# Creates a DBAPI 2.0 compatible interface to a PostgreSQL database.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
#
# @param user The username to connect to the PostgreSQL server with. This
# parameter is required.
#
# @keyparam host The hostname of the PostgreSQL server to connect with.
# Providing this parameter is necessary for TCP/IP connections. One of either
# host, or unix_sock, must be provided.
#
# @keyparam unix_sock The path to the UNIX socket to access the database
# through, for example, '/tmp/.s.PGSQL.5432'. One of either unix_sock or host
# must be provided. The port parameter will have no affect if unix_sock is
# provided.
#
# @keyparam port The TCP/IP port of the PostgreSQL server instance. This
# parameter defaults to 5432, the registered and common port of PostgreSQL
# TCP/IP servers.
#
# @keyparam database The name of the database instance to connect with. This
# parameter is optional, if omitted the PostgreSQL server will assume the
# database name is the same as the username.
#
# @keyparam password The user password to connect to the server with. This
# parameter is optional. If omitted, and the database server requests password
# based authentication, the connection will fail. On the other hand, if this
# parameter is provided and the database does not request password
# authentication, then the password will not be used.
#
# @keyparam socket_timeout Socket connect timeout measured in seconds.
# Defaults to 60 seconds.
#
# @keyparam ssl Use SSL encryption for TCP/IP socket. Defaults to False.
#
# @return An instance of {@link #ConnectionWrapper ConnectionWrapper}.
def connect(dsn="", user=None, host=None, unix_sock=None, port=5432, database=None, password=None, socket_timeout=60, ssl=False):
return ConnectionWrapper(dsn=dsn, user=user, host=host,
unix_sock=unix_sock, port=port, database=database,
password=password, socket_timeout=socket_timeout, ssl=ssl)
def Date(year, month, day):
return datetime.date(year, month, day)
def Time(hour, minute, second):
return datetime.time(hour, minute, second)
def Timestamp(year, month, day, hour, minute, second):
return datetime.datetime(year, month, day, hour, minute, second)
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
##
# Construct an object holding binary data.
def Binary(value):
return types.Bytea(value)
# I have no idea what this would be used for by a client app. Should it be
# TEXT, VARCHAR, CHAR? It will only compare against row_description's
# type_code if it is this one type. It is the varchar type oid for now, this
# appears to match expectations in the DB API 2.0 compliance test suite.
STRING = 1043
# bytea type_oid
BINARY = 17
# numeric type_oid
NUMBER = 1700
# timestamp type_oid
DATETIME = 1114
# oid type_oid
ROWID = 26
| Python |
# vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2007-2009, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "Mathieu Fenniak"
import socket
try:
import ssl as sslmodule
except ImportError:
sslmodule = None
import select
import threading
import struct
import hashlib
from cStringIO import StringIO
from errors import *
from util import MulticastDelegate
import types
##
# An SSLRequest message. To initiate an SSL-encrypted connection, an
# SSLRequest message is used rather than a {@link StartupMessage
# StartupMessage}. A StartupMessage is still sent, but only after SSL
# negotiation (if accepted).
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class SSLRequest(object):
def __init__(self):
pass
# Int32(8) - Message length, including self.<br>
# Int32(80877103) - The SSL request code.<br>
def serialize(self):
return struct.pack("!ii", 8, 80877103)
##
# A StartupMessage message. Begins a DB session, identifying the user to be
# authenticated as and the database to connect to.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class StartupMessage(object):
def __init__(self, user, database=None):
self.user = user
self.database = database
# Int32 - Message length, including self.
# Int32(196608) - Protocol version number. Version 3.0.
# Any number of key/value pairs, terminated by a zero byte:
# String - A parameter name (user, database, or options)
# String - Parameter value
def serialize(self):
protocol = 196608
val = struct.pack("!i", protocol)
val += "user\x00" + self.user + "\x00"
if self.database:
val += "database\x00" + self.database + "\x00"
val += "\x00"
val = struct.pack("!i", len(val) + 4) + val
return val
##
# Parse message. Creates a prepared statement in the DB session.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
#
# @param ps Name of the prepared statement to create.
# @param qs Query string.
# @param type_oids An iterable that contains the PostgreSQL type OIDs for
# parameters in the query string.
class Parse(object):
def __init__(self, ps, qs, type_oids):
if isinstance(qs, unicode):
raise TypeError("qs must be encoded byte data")
self.ps = ps
self.qs = qs
self.type_oids = type_oids
def __repr__(self):
return "<Parse ps=%r qs=%r>" % (self.ps, self.qs)
# Byte1('P') - Identifies the message as a Parse command.
# Int32 - Message length, including self.
# String - Prepared statement name. An empty string selects the unnamed
# prepared statement.
# String - The query string.
# Int16 - Number of parameter data types specified (can be zero).
# For each parameter:
# Int32 - The OID of the parameter data type.
def serialize(self):
val = self.ps + "\x00" + self.qs + "\x00"
val = val + struct.pack("!h", len(self.type_oids))
for oid in self.type_oids:
# Parse message doesn't seem to handle the -1 type_oid for NULL
# values that other messages handle. So we'll provide type_oid 705,
# the PG "unknown" type.
if oid == -1: oid = 705
val = val + struct.pack("!i", oid)
val = struct.pack("!i", len(val) + 4) + val
val = "P" + val
return val
##
# Bind message. Readies a prepared statement for execution.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
#
# @param portal Name of the destination portal.
# @param ps Name of the source prepared statement.
# @param in_fc An iterable containing the format codes for input
# parameters. 0 = Text, 1 = Binary.
# @param params The parameters.
# @param out_fc An iterable containing the format codes for output
# parameters. 0 = Text, 1 = Binary.
# @param kwargs Additional arguments to pass to the type conversion
# methods.
class Bind(object):
def __init__(self, portal, ps, in_fc, params, out_fc, **kwargs):
self.portal = portal
self.ps = ps
self.in_fc = in_fc
self.params = []
for i in range(len(params)):
if len(self.in_fc) == 0:
fc = 0
elif len(self.in_fc) == 1:
fc = self.in_fc[0]
else:
fc = self.in_fc[i]
self.params.append(types.pg_value(params[i], fc, **kwargs))
self.out_fc = out_fc
def __repr__(self):
return "<Bind p=%r s=%r>" % (self.portal, self.ps)
# Byte1('B') - Identifies the Bind command.
# Int32 - Message length, including self.
# String - Name of the destination portal.
# String - Name of the source prepared statement.
# Int16 - Number of parameter format codes.
# For each parameter format code:
# Int16 - The parameter format code.
# Int16 - Number of parameter values.
# For each parameter value:
# Int32 - The length of the parameter value, in bytes, not including this
# this length. -1 indicates a NULL parameter value, in which no
# value bytes follow.
# Byte[n] - Value of the parameter.
# Int16 - The number of result-column format codes.
# For each result-column format code:
# Int16 - The format code.
def serialize(self):
retval = StringIO()
retval.write(self.portal + "\x00")
retval.write(self.ps + "\x00")
retval.write(struct.pack("!h", len(self.in_fc)))
for fc in self.in_fc:
retval.write(struct.pack("!h", fc))
retval.write(struct.pack("!h", len(self.params)))
for param in self.params:
if param == None:
# special case, NULL value
retval.write(struct.pack("!i", -1))
else:
retval.write(struct.pack("!i", len(param)))
retval.write(param)
retval.write(struct.pack("!h", len(self.out_fc)))
for fc in self.out_fc:
retval.write(struct.pack("!h", fc))
val = retval.getvalue()
val = struct.pack("!i", len(val) + 4) + val
val = "B" + val
return val
##
# A Close message, used for closing prepared statements and portals.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
#
# @param typ 'S' for prepared statement, 'P' for portal.
# @param name The name of the item to close.
class Close(object):
def __init__(self, typ, name):
if len(typ) != 1:
raise InternalError("Close typ must be 1 char")
self.typ = typ
self.name = name
# Byte1('C') - Identifies the message as a close command.
# Int32 - Message length, including self.
# Byte1 - 'S' for prepared statement, 'P' for portal.
# String - The name of the item to close.
def serialize(self):
val = self.typ + self.name + "\x00"
val = struct.pack("!i", len(val) + 4) + val
val = "C" + val
return val
##
# A specialized Close message for a portal.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class ClosePortal(Close):
def __init__(self, name):
Close.__init__(self, "P", name)
##
# A specialized Close message for a prepared statement.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class ClosePreparedStatement(Close):
def __init__(self, name):
Close.__init__(self, "S", name)
##
# A Describe message, used for obtaining information on prepared statements
# and portals.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
#
# @param typ 'S' for prepared statement, 'P' for portal.
# @param name The name of the item to close.
class Describe(object):
def __init__(self, typ, name):
if len(typ) != 1:
raise InternalError("Describe typ must be 1 char")
self.typ = typ
self.name = name
# Byte1('D') - Identifies the message as a describe command.
# Int32 - Message length, including self.
# Byte1 - 'S' for prepared statement, 'P' for portal.
# String - The name of the item to close.
def serialize(self):
val = self.typ + self.name + "\x00"
val = struct.pack("!i", len(val) + 4) + val
val = "D" + val
return val
##
# A specialized Describe message for a portal.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class DescribePortal(Describe):
def __init__(self, name):
Describe.__init__(self, "P", name)
def __repr__(self):
return "<DescribePortal %r>" % (self.name)
##
# A specialized Describe message for a prepared statement.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class DescribePreparedStatement(Describe):
def __init__(self, name):
Describe.__init__(self, "S", name)
def __repr__(self):
return "<DescribePreparedStatement %r>" % (self.name)
##
# A Flush message forces the backend to deliver any data pending in its
# output buffers.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class Flush(object):
# Byte1('H') - Identifies the message as a flush command.
# Int32(4) - Length of message, including self.
def serialize(self):
return 'H\x00\x00\x00\x04'
def __repr__(self):
return "<Flush>"
##
# Causes the backend to close the current transaction (if not in a BEGIN/COMMIT
# block), and issue ReadyForQuery.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class Sync(object):
# Byte1('S') - Identifies the message as a sync command.
# Int32(4) - Length of message, including self.
def serialize(self):
return 'S\x00\x00\x00\x04'
def __repr__(self):
return "<Sync>"
##
# Transmits a password.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class PasswordMessage(object):
def __init__(self, pwd):
self.pwd = pwd
# Byte1('p') - Identifies the message as a password message.
# Int32 - Message length including self.
# String - The password. Password may be encrypted.
def serialize(self):
val = self.pwd + "\x00"
val = struct.pack("!i", len(val) + 4) + val
val = "p" + val
return val
##
# Requests that the backend execute a portal and retrieve any number of rows.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
# @param row_count The number of rows to return. Can be zero to indicate the
# backend should return all rows. If the portal represents a
# query that does not return rows, no rows will be returned
# no matter what the row_count.
class Execute(object):
def __init__(self, portal, row_count):
self.portal = portal
self.row_count = row_count
# Byte1('E') - Identifies the message as an execute message.
# Int32 - Message length, including self.
# String - The name of the portal to execute.
# Int32 - Maximum number of rows to return, if portal contains a query that
# returns rows. 0 = no limit.
def serialize(self):
val = self.portal + "\x00" + struct.pack("!i", self.row_count)
val = struct.pack("!i", len(val) + 4) + val
val = "E" + val
return val
class SimpleQuery(object):
"Requests that the backend execute a Simple Query (SQL string)"
def __init__(self, query_string):
self.query_string = query_string
# Byte1('Q') - Identifies the message as an query message.
# Int32 - Message length, including self.
# String - The query string itself.
def serialize(self):
val = self.query_string + "\x00"
val = struct.pack("!i", len(val) + 4) + val
val = "Q" + val
return val
def __repr__(self):
return "<SimpleQuery qs=%r>" % (self.query_string)
##
# Informs the backend that the connection is being closed.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class Terminate(object):
def __init__(self):
pass
# Byte1('X') - Identifies the message as a terminate message.
# Int32(4) - Message length, including self.
def serialize(self):
return 'X\x00\x00\x00\x04'
##
# Base class of all Authentication[*] messages.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class AuthenticationRequest(object):
def __init__(self, data):
pass
# Byte1('R') - Identifies the message as an authentication request.
# Int32(8) - Message length, including self.
# Int32 - An authentication code that represents different
# authentication messages:
# 0 = AuthenticationOk
# 5 = MD5 pwd
# 2 = Kerberos v5 (not supported by pg8000)
# 3 = Cleartext pwd (not supported by pg8000)
# 4 = crypt() pwd (not supported by pg8000)
# 6 = SCM credential (not supported by pg8000)
# 7 = GSSAPI (not supported by pg8000)
# 8 = GSSAPI data (not supported by pg8000)
# 9 = SSPI (not supported by pg8000)
# Some authentication messages have additional data following the
# authentication code. That data is documented in the appropriate class.
def createFromData(data):
ident = struct.unpack("!i", data[:4])[0]
klass = authentication_codes.get(ident, None)
if klass != None:
return klass(data[4:])
else:
raise NotSupportedError("authentication method %r not supported" % (ident,))
createFromData = staticmethod(createFromData)
def ok(self, conn, user, **kwargs):
raise InternalError("ok method should be overridden on AuthenticationRequest instance")
##
# A message representing that the backend accepting the provided username
# without any challenge.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class AuthenticationOk(AuthenticationRequest):
def ok(self, conn, user, **kwargs):
return True
##
# A message representing the backend requesting an MD5 hashed password
# response. The response will be sent as md5(md5(pwd + login) + salt).
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class AuthenticationMD5Password(AuthenticationRequest):
# Additional message data:
# Byte4 - Hash salt.
def __init__(self, data):
self.salt = "".join(struct.unpack("4c", data))
def ok(self, conn, user, password=None, **kwargs):
if password == None:
raise InterfaceError("server requesting MD5 password authentication, but no password was provided")
pwd = "md5" + hashlib.md5(hashlib.md5(password + user).hexdigest() + self.salt).hexdigest()
conn._send(PasswordMessage(pwd))
conn._flush()
reader = MessageReader(conn)
reader.add_message(AuthenticationRequest, lambda msg, reader: reader.return_value(msg.ok(conn, user)), reader)
reader.add_message(ErrorResponse, self._ok_error)
return reader.handle_messages()
def _ok_error(self, msg):
if msg.code == "28000":
raise InterfaceError("md5 password authentication failed")
else:
raise msg.createException()
authentication_codes = {
0: AuthenticationOk,
5: AuthenticationMD5Password,
}
##
# ParameterStatus message sent from backend, used to inform the frotnend of
# runtime configuration parameter changes.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class ParameterStatus(object):
def __init__(self, key, value):
self.key = key
self.value = value
# Byte1('S') - Identifies ParameterStatus
# Int32 - Message length, including self.
# String - Runtime parameter name.
# String - Runtime parameter value.
def createFromData(data):
key = data[:data.find("\x00")]
value = data[data.find("\x00")+1:-1]
return ParameterStatus(key, value)
createFromData = staticmethod(createFromData)
##
# BackendKeyData message sent from backend. Contains a connection's process
# ID and a secret key. Can be used to terminate the connection's current
# actions, such as a long running query. Not supported by pg8000 yet.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class BackendKeyData(object):
def __init__(self, process_id, secret_key):
self.process_id = process_id
self.secret_key = secret_key
# Byte1('K') - Identifier.
# Int32(12) - Message length, including self.
# Int32 - Process ID.
# Int32 - Secret key.
def createFromData(data):
process_id, secret_key = struct.unpack("!2i", data)
return BackendKeyData(process_id, secret_key)
createFromData = staticmethod(createFromData)
##
# Message representing a query with no data.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class NoData(object):
# Byte1('n') - Identifier.
# Int32(4) - Message length, including self.
def createFromData(data):
return NoData()
createFromData = staticmethod(createFromData)
##
# Message representing a successful Parse.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class ParseComplete(object):
# Byte1('1') - Identifier.
# Int32(4) - Message length, including self.
def createFromData(data):
return ParseComplete()
createFromData = staticmethod(createFromData)
##
# Message representing a successful Bind.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class BindComplete(object):
# Byte1('2') - Identifier.
# Int32(4) - Message length, including self.
def createFromData(data):
return BindComplete()
createFromData = staticmethod(createFromData)
##
# Message representing a successful Close.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class CloseComplete(object):
# Byte1('3') - Identifier.
# Int32(4) - Message length, including self.
def createFromData(data):
return CloseComplete()
createFromData = staticmethod(createFromData)
##
# Message representing data from an Execute has been received, but more data
# exists in the portal.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class PortalSuspended(object):
# Byte1('s') - Identifier.
# Int32(4) - Message length, including self.
def createFromData(data):
return PortalSuspended()
createFromData = staticmethod(createFromData)
##
# Message representing the backend is ready to process a new query.
# <p>
# Stability: This is an internal class. No stability guarantee is made.
class ReadyForQuery(object):
def __init__(self, status):
self._status = status
##
# I = Idle, T = Idle in Transaction, E = idle in failed transaction.
status = property(lambda self: self._status)
def __repr__(self):
return "<ReadyForQuery %s>" % \
{"I": "Idle", "T": "Idle in Transaction", "E": "Idle in Failed Transaction"}[self.status]
# Byte1('Z') - Identifier.
# Int32(5) - Message length, including self.
# Byte1 - Status indicator.
def createFromData(data):
return ReadyForQuery(data)
createFromData = staticmethod(createFromData)
##
# Represents a notice sent from the server. This is not the same as a
# notification. A notice is just additional information about a query, such
# as a notice that a primary key has automatically been created for a table.
# <p>
# A NoticeResponse instance will have properties containing the data sent
# from the server:
# <ul>
# <li>severity -- "ERROR", "FATAL', "PANIC", "WARNING", "NOTICE", "DEBUG",
# "INFO", or "LOG". Always present.</li>
# <li>code -- the SQLSTATE code for the error. See Appendix A of the
# PostgreSQL documentation for specific error codes. Always present.</li>
# <li>msg -- human-readable error message. Always present.</li>
# <li>detail -- Optional additional information.</li>
# <li>hint -- Optional suggestion about what to do about the issue.</li>
# <li>position -- Optional index into the query string.</li>
# <li>where -- Optional context.</li>
# <li>file -- Source-code file.</li>
# <li>line -- Source-code line.</li>
# <li>routine -- Source-code routine.</li>
# </ul>
# <p>
# Stability: Added in pg8000 v1.03. Required properties severity, code, and
# msg are guaranteed for v1.xx. Other properties should be checked with
# hasattr before accessing.
class NoticeResponse(object):
responseKeys = {
"S": "severity", # always present
"C": "code", # always present
"M": "msg", # always present
"D": "detail",
"H": "hint",
"P": "position",
"p": "_position",
"q": "_query",
"W": "where",
"F": "file",
"L": "line",
"R": "routine",
}
def __init__(self, **kwargs):
for arg, value in kwargs.items():
setattr(self, arg, value)
def __repr__(self):
return "<NoticeResponse %s %s %r>" % (self.severity, self.code, self.msg)
def dataIntoDict(data):
retval = {}
for s in data.split("\x00"):
if not s: continue
key, value = s[0], s[1:]
key = NoticeResponse.responseKeys.get(key, key)
retval[key] = value
return retval
dataIntoDict = staticmethod(dataIntoDict)
# Byte1('N') - Identifier
# Int32 - Message length
# Any number of these, followed by a zero byte:
# Byte1 - code identifying the field type (see responseKeys)
# String - field value
def createFromData(data):
return NoticeResponse(**NoticeResponse.dataIntoDict(data))
createFromData = staticmethod(createFromData)
##
# A message sent in case of a server-side error. Contains the same properties
# that {@link NoticeResponse NoticeResponse} contains.
# <p>
# Stability: Added in pg8000 v1.03. Required properties severity, code, and
# msg are guaranteed for v1.xx. Other properties should be checked with
# hasattr before accessing.
class ErrorResponse(object):
def __init__(self, **kwargs):
for arg, value in kwargs.items():
setattr(self, arg, value)
def __repr__(self):
return "<ErrorResponse %s %s %r>" % (self.severity, self.code, self.msg)
def createException(self):
return ProgrammingError(self.severity, self.code, self.msg)
def createFromData(data):
return ErrorResponse(**NoticeResponse.dataIntoDict(data))
createFromData = staticmethod(createFromData)
##
# A message sent if this connection receives a NOTIFY that it was LISTENing for.
# <p>
# Stability: Added in pg8000 v1.03. When limited to accessing properties from
# a notification event dispatch, stability is guaranteed for v1.xx.
class NotificationResponse(object):
def __init__(self, backend_pid, condition, additional_info):
self._backend_pid = backend_pid
self._condition = condition
self._additional_info = additional_info
##
# An integer representing the process ID of the backend that triggered
# the NOTIFY.
# <p>
# Stability: Added in pg8000 v1.03, stability guaranteed for v1.xx.
backend_pid = property(lambda self: self._backend_pid)
##
# The name of the notification fired.
# <p>
# Stability: Added in pg8000 v1.03, stability guaranteed for v1.xx.
condition = property(lambda self: self._condition)
##
# Currently unspecified by the PostgreSQL documentation as of v8.3.1.
# <p>
# Stability: Added in pg8000 v1.03, stability guaranteed for v1.xx.
additional_info = property(lambda self: self._additional_info)
def __repr__(self):
return "<NotificationResponse %s %s %r>" % (self.backend_pid, self.condition, self.additional_info)
def createFromData(data):
backend_pid = struct.unpack("!i", data[:4])[0]
data = data[4:]
null = data.find("\x00")
condition = data[:null]
data = data[null+1:]
null = data.find("\x00")
additional_info = data[:null]
return NotificationResponse(backend_pid, condition, additional_info)
createFromData = staticmethod(createFromData)
class ParameterDescription(object):
def __init__(self, type_oids):
self.type_oids = type_oids
def createFromData(data):
count = struct.unpack("!h", data[:2])[0]
type_oids = struct.unpack("!" + "i"*count, data[2:])
return ParameterDescription(type_oids)
createFromData = staticmethod(createFromData)
class RowDescription(object):
def __init__(self, fields):
self.fields = fields
def createFromData(data):
count = struct.unpack("!h", data[:2])[0]
data = data[2:]
fields = []
for i in range(count):
null = data.find("\x00")
field = {"name": data[:null]}
data = data[null+1:]
field["table_oid"], field["column_attrnum"], field["type_oid"], field["type_size"], field["type_modifier"], field["format"] = struct.unpack("!ihihih", data[:18])
data = data[18:]
fields.append(field)
return RowDescription(fields)
createFromData = staticmethod(createFromData)
class CommandComplete(object):
def __init__(self, command, rows=None, oid=None):
self.command = command
self.rows = rows
self.oid = oid
def createFromData(data):
values = data[:-1].split(" ")
args = {}
args['command'] = values[0]
if args['command'] in ("INSERT", "DELETE", "UPDATE", "MOVE", "FETCH", "COPY", "SELECT"):
args['rows'] = int(values[-1])
if args['command'] == "INSERT":
args['oid'] = int(values[1])
else:
args['command'] = data[:-1]
return CommandComplete(**args)
createFromData = staticmethod(createFromData)
class DataRow(object):
def __init__(self, fields):
self.fields = fields
def createFromData(data):
count = struct.unpack("!h", data[:2])[0]
data = data[2:]
fields = []
for i in range(count):
val_len = struct.unpack("!i", data[:4])[0]
data = data[4:]
if val_len == -1:
fields.append(None)
else:
fields.append(data[:val_len])
data = data[val_len:]
return DataRow(fields)
createFromData = staticmethod(createFromData)
class CopyData(object):
# "d": CopyData,
def __init__(self, data):
self.data = data
def createFromData(data):
return CopyData(data)
createFromData = staticmethod(createFromData)
def serialize(self):
return 'd' + struct.pack('!i', len(self.data) + 4) + self.data
class CopyDone(object):
# Byte1('c') - Identifier.
# Int32(4) - Message length, including self.
def createFromData(data):
return CopyDone()
createFromData = staticmethod(createFromData)
def serialize(self):
return 'c\x00\x00\x00\x04'
class CopyOutResponse(object):
# Byte1('H')
# Int32(4) - Length of message contents in bytes, including self.
# Int8(1) - 0 textual, 1 binary
# Int16(2) - Number of columns
# Int16(N) - Format codes for each column (0 text, 1 binary)
def __init__(self, is_binary, column_formats):
self.is_binary = is_binary
self.column_formats = column_formats
def createFromData(data):
is_binary, num_cols = struct.unpack('!bh', data[:3])
column_formats = struct.unpack('!' + ('h' * num_cols), data[3:])
return CopyOutResponse(is_binary, column_formats)
createFromData = staticmethod(createFromData)
class CopyInResponse(object):
# Byte1('G')
# Otherwise the same as CopyOutResponse
def __init__(self, is_binary, column_formats):
self.is_binary = is_binary
self.column_formats = column_formats
def createFromData(data):
is_binary, num_cols = struct.unpack('!bh', data[:3])
column_formats = struct.unpack('!' + ('h' * num_cols), data[3:])
return CopyInResponse(is_binary, column_formats)
createFromData = staticmethod(createFromData)
class EmptyQueryResponse(object):
# Byte1('I')
# Response to an empty query string. (This substitutes for CommandComplete.)
def createFromData(data):
return EmptyQueryResponse()
createFromData = staticmethod(createFromData)
class MessageReader(object):
def __init__(self, connection):
self._conn = connection
self._msgs = []
# If true, raise exception from an ErrorResponse after messages are
# processed. This can be used to leave the connection in a usable
# state after an error response, rather than having unconsumed
# messages that won't be understood in another context.
self.delay_raising_exception = False
self.ignore_unhandled_messages = False
def add_message(self, msg_class, handler, *args, **kwargs):
self._msgs.append((msg_class, handler, args, kwargs))
def clear_messages(self):
self._msgs = []
def return_value(self, value):
self._retval = value
def handle_messages(self):
exc = None
while 1:
msg = self._conn._read_message()
msg_handled = False
for (msg_class, handler, args, kwargs) in self._msgs:
if isinstance(msg, msg_class):
msg_handled = True
retval = handler(msg, *args, **kwargs)
if retval:
# The handler returned a true value, meaning that the
# message loop should be aborted.
if exc != None:
raise exc
return retval
elif hasattr(self, "_retval"):
# The handler told us to return -- used for non-true
# return values
if exc != None:
raise exc
return self._retval
if msg_handled:
continue
elif isinstance(msg, ErrorResponse):
exc = msg.createException()
if not self.delay_raising_exception:
raise exc
elif isinstance(msg, NoticeResponse):
self._conn.handleNoticeResponse(msg)
elif isinstance(msg, ParameterStatus):
self._conn.handleParameterStatus(msg)
elif isinstance(msg, NotificationResponse):
self._conn.handleNotificationResponse(msg)
elif not self.ignore_unhandled_messages:
raise InternalError("Unexpected response msg %r" % (msg))
def sync_on_error(fn):
def _fn(self, *args, **kwargs):
try:
self._sock_lock.acquire()
return fn(self, *args, **kwargs)
except:
self._sync()
raise
finally:
self._sock_lock.release()
return _fn
class Connection(object):
def __init__(self, unix_sock=None, host=None, port=5432, socket_timeout=60, ssl=False):
self._client_encoding = "ascii"
self._integer_datetimes = False
self._server_version = None
self._sock_buf = ""
self._sock_buf_pos = 0
self._send_sock_buf = []
self._block_size = 8192
self._sock_lock = threading.Lock()
if unix_sock == None and host != None:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif unix_sock != None:
if not hasattr(socket, "AF_UNIX"):
raise InterfaceError("attempt to connect to unix socket on unsupported platform")
self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
else:
raise ProgrammingError("one of host or unix_sock must be provided")
if unix_sock == None and host != None:
self._sock.connect((host, port))
elif unix_sock != None:
self._sock.connect(unix_sock)
if ssl:
self._sock_lock.acquire()
try:
self._send(SSLRequest())
self._flush()
resp = self._sock.recv(1)
if resp == 'S' and sslmodule is not None:
self._sock = sslmodule.wrap_socket(self._sock)
elif sslmodule is None:
raise InterfaceError("SSL required but ssl module not available in this python installation")
else:
raise InterfaceError("server refuses SSL")
finally:
self._sock_lock.release()
else:
# settimeout causes ssl failure, on windows. Python bug 1462352.
self._sock.settimeout(socket_timeout)
self._state = "noauth"
self._backend_key_data = None
self.NoticeReceived = MulticastDelegate()
self.ParameterStatusReceived = MulticastDelegate()
self.NotificationReceived = MulticastDelegate()
self.ParameterStatusReceived += self._onParameterStatusReceived
def verifyState(self, state):
if self._state != state:
raise InternalError("connection state must be %s, is %s" % (state, self._state))
def _send(self, msg):
assert self._sock_lock.locked()
##print "_send(%r)" % msg
data = msg.serialize()
if not isinstance(data, str):
raise TypeError("bytes data expected")
self._send_sock_buf.append(data)
def _flush(self):
assert self._sock_lock.locked()
self._sock.sendall("".join(self._send_sock_buf))
del self._send_sock_buf[:]
def _read_bytes(self, byte_count):
retval = []
bytes_read = 0
while bytes_read < byte_count:
if self._sock_buf_pos == len(self._sock_buf):
self._sock_buf = self._sock.recv(1024)
self._sock_buf_pos = 0
rpos = min(len(self._sock_buf), self._sock_buf_pos + (byte_count - bytes_read))
addt_data = self._sock_buf[self._sock_buf_pos:rpos]
bytes_read += (rpos - self._sock_buf_pos)
assert bytes_read <= byte_count
self._sock_buf_pos = rpos
retval.append(addt_data)
return "".join(retval)
def _read_message(self):
assert self._sock_lock.locked()
bytes = self._read_bytes(5)
message_code = bytes[0]
data_len = struct.unpack("!i", bytes[1:])[0] - 4
bytes = self._read_bytes(data_len)
assert len(bytes) == data_len
msg = message_types[message_code].createFromData(bytes)
##print "_read_message() -> %r" % msg
return msg
def authenticate(self, user, **kwargs):
self.verifyState("noauth")
self._sock_lock.acquire()
try:
self._send(StartupMessage(user, database=kwargs.get("database",None)))
self._flush()
reader = MessageReader(self)
reader.add_message(AuthenticationRequest, self._authentication_request(user, **kwargs))
reader.handle_messages()
finally:
self._sock_lock.release()
def _authentication_request(self, user, **kwargs):
def _func(msg):
assert self._sock_lock.locked()
if not msg.ok(self, user, **kwargs):
raise InterfaceError("authentication method %s failed" % msg.__class__.__name__)
self._state = "auth"
reader = MessageReader(self)
reader.add_message(ReadyForQuery, self._ready_for_query)
reader.add_message(BackendKeyData, self._receive_backend_key_data)
reader.handle_messages()
return 1
return _func
def _ready_for_query(self, msg):
self._state = "ready"
return True
def _receive_backend_key_data(self, msg):
self._backend_key_data = msg
@sync_on_error
def parse(self, statement, qs, param_types):
self.verifyState("ready")
type_info = [types.pg_type_info(x) for x in param_types]
param_types, param_fc = [x[0] for x in type_info], [x[1] for x in type_info] # zip(*type_info) -- fails on empty arr
if isinstance(qs, unicode):
qs = qs.encode(self._client_encoding)
self._send(Parse(statement, qs, param_types))
self._send(DescribePreparedStatement(statement))
self._send(Flush())
self._flush()
reader = MessageReader(self)
# ParseComplete is good.
reader.add_message(ParseComplete, lambda msg: 0)
# Well, we don't really care -- we're going to send whatever we
# want and let the database deal with it. But thanks anyways!
reader.add_message(ParameterDescription, lambda msg: 0)
# We're not waiting for a row description. Return something
# destinctive to let bind know that there is no output.
reader.add_message(NoData, lambda msg: (None, param_fc))
# Common row description response
reader.add_message(RowDescription, lambda msg: (msg, param_fc))
return reader.handle_messages()
@sync_on_error
def bind(self, portal, statement, params, parse_data, copy_stream):
self.verifyState("ready")
row_desc, param_fc = parse_data
if row_desc == None:
# no data coming out
output_fc = ()
else:
# We've got row_desc that allows us to identify what we're going to
# get back from this statement.
output_fc = [types.py_type_info(f) for f in row_desc.fields]
self._send(Bind(portal, statement, param_fc, params, output_fc, client_encoding = self._client_encoding, integer_datetimes = self._integer_datetimes))
# We need to describe the portal after bind, since the return
# format codes will be different (hopefully, always what we
# requested).
self._send(DescribePortal(portal))
self._send(Flush())
self._flush()
# Read responses from server...
reader = MessageReader(self)
# BindComplete is good -- just ignore
reader.add_message(BindComplete, lambda msg: 0)
# NoData in this case means we're not executing a query. As a
# result, we won't be fetching rows, so we'll never execute the
# portal we just created... unless we execute it right away, which
# we'll do.
reader.add_message(NoData, self._bind_nodata, portal, reader, copy_stream)
# Return the new row desc, since it will have the format types we
# asked the server for
reader.add_message(RowDescription, lambda msg: (msg, None))
return reader.handle_messages()
def _copy_in_response(self, copyin, fileobj, old_reader):
if fileobj == None:
raise CopyQueryWithoutStreamError()
while True:
data = fileobj.read(self._block_size)
if not data:
break
self._send(CopyData(data))
self._flush()
self._send(CopyDone())
self._send(Sync())
self._flush()
def _copy_out_response(self, copyout, fileobj, old_reader):
if fileobj == None:
raise CopyQueryWithoutStreamError()
reader = MessageReader(self)
reader.add_message(CopyData, self._copy_data, fileobj)
reader.add_message(CopyDone, lambda msg: 1)
reader.handle_messages()
def _copy_data(self, copydata, fileobj):
fileobj.write(copydata.data)
def _bind_nodata(self, msg, portal, old_reader, copy_stream):
# Bind message returned NoData, causing us to execute the command.
self._send(Execute(portal, 0))
self._send(Sync())
self._flush()
output = {}
reader = MessageReader(self)
reader.add_message(CopyOutResponse, self._copy_out_response, copy_stream, reader)
reader.add_message(CopyInResponse, self._copy_in_response, copy_stream, reader)
reader.add_message(CommandComplete, lambda msg, out: out.setdefault('msg', msg) and False, output)
reader.add_message(ReadyForQuery, lambda msg: 1)
reader.delay_raising_exception = True
reader.handle_messages()
old_reader.return_value((None, output['msg']))
@sync_on_error
def send_simple_query(self, query_string, copy_stream=None):
"Submit a simple query (PQsendQuery)"
# Only use this for trivial queries, as its use is discouraged because:
# CONS:
# - Parameter are "injected" (they should be escaped by the app)
# - Exesive memory usage (allways returns all rows on completion)
# - Inneficient transmission of data in plain text (except for FETCH)
# - No Prepared Statement support, each query is parsed every time
# - Basic implementation: minimal error recovery and type support
# PROS:
# - compact: equivalent to Parse, Bind, Describe, Execute, Close, Sync
# - doesn't returns ParseComplete, BindComplete, CloseComplete, NoData
# - it supports multiple statements in a single query string
# - it is available when the Streaming Replication Protocol is actived
# NOTE: this is the protocol used by psycopg2
# (they also uses named cursors to overcome some drawbacks)
self.verifyState("ready")
if isinstance(query_string, unicode):
query_string = query_string.encode(self._client_encoding)
self._send(SimpleQuery(query_string))
self._flush()
# define local storage for message handlers:
output = {}
rows = []
# create and add handlers for all the possible messages:
reader = MessageReader(self)
# read row description but continue processing messages... (return false)
reader.add_message(RowDescription, lambda msg, out: out.setdefault('row_desc', msg) and False, output)
reader.add_message(DataRow, lambda msg: self._fetch_datarow(msg, rows, output['row_desc']))
reader.add_message(EmptyQueryResponse, lambda msg: False)
reader.add_message(CommandComplete, lambda msg, out: out.setdefault('complete', msg) and False, output)
reader.add_message(CopyInResponse, self._copy_in_response, copy_stream, reader)
reader.add_message(CopyOutResponse, self._copy_out_response, copy_stream, reader)
# messages indicating that we've hit the end of the available data for this command
reader.add_message(ReadyForQuery, lambda msg: 1)
# process all messages and then raise exceptions (if any)
reader.delay_raising_exception = True
# start processing the messages from the backend:
retval = reader.handle_messages()
# return a dict with command complete / row description message values
return output.get('row_desc'), output.get('complete'), rows
@sync_on_error
def fetch_rows(self, portal, row_count, row_desc):
self.verifyState("ready")
self._send(Execute(portal, row_count))
self._send(Flush())
self._flush()
rows = []
reader = MessageReader(self)
reader.add_message(DataRow, self._fetch_datarow, rows, row_desc)
reader.add_message(PortalSuspended, lambda msg: 1)
reader.add_message(CommandComplete, self._fetch_commandcomplete, portal)
retval = reader.handle_messages()
# retval = 2 when command complete, indicating that we've hit the
# end of the available data for this command
return (retval == 2), rows
def _fetch_datarow(self, msg, rows, row_desc):
rows.append(
[
types.py_value(
msg.fields[i],
row_desc.fields[i],
client_encoding=self._client_encoding,
integer_datetimes=self._integer_datetimes,
)
for i in range(len(msg.fields))
]
)
def _fetch_commandcomplete(self, msg, portal):
self._send(ClosePortal(portal))
self._send(Sync())
self._flush()
reader = MessageReader(self)
reader.add_message(ReadyForQuery, self._fetch_commandcomplete_rfq)
reader.add_message(CloseComplete, lambda msg: False)
reader.handle_messages()
return 2 # signal end-of-data
def _fetch_commandcomplete_rfq(self, msg):
self._state = "ready"
return True
# Send a Sync message, then read and discard all messages until we
# receive a ReadyForQuery message.
def _sync(self):
# it is assumed _sync is called from sync_on_error, which holds
# a _sock_lock throughout the call
self._send(Sync())
self._flush()
reader = MessageReader(self)
reader.ignore_unhandled_messages = True
reader.add_message(ReadyForQuery, lambda msg: True)
reader.handle_messages()
def close_statement(self, statement):
if self._state == "closed":
return
self.verifyState("ready")
self._sock_lock.acquire()
try:
self._send(ClosePreparedStatement(statement))
self._send(Sync())
self._flush()
reader = MessageReader(self)
reader.add_message(CloseComplete, lambda msg: 0)
reader.add_message(ReadyForQuery, lambda msg: 1)
reader.handle_messages()
finally:
self._sock_lock.release()
def close_portal(self, portal):
if self._state == "closed":
return
self.verifyState("ready")
self._sock_lock.acquire()
try:
self._send(ClosePortal(portal))
self._send(Sync())
self._flush()
reader = MessageReader(self)
reader.add_message(CloseComplete, lambda msg: 0)
reader.add_message(ReadyForQuery, lambda msg: 1)
reader.handle_messages()
finally:
self._sock_lock.release()
def close(self):
self._sock_lock.acquire()
try:
self._send(Terminate())
self._flush()
self._sock.close()
self._state = "closed"
finally:
self._sock_lock.release()
def _onParameterStatusReceived(self, msg):
if msg.key == "client_encoding":
self._client_encoding = types.encoding_convert(msg.value)
##print "_onParameterStatusReceived client_encoding", self._client_encoding
elif msg.key == "integer_datetimes":
self._integer_datetimes = (msg.value == "on")
elif msg.key == "server_version":
self._server_version = msg.value
else:
##print "_onParameterStatusReceived ", msg.key, msg.value
pass
def handleNoticeResponse(self, msg):
self.NoticeReceived(msg)
def handleParameterStatus(self, msg):
self.ParameterStatusReceived(msg)
def handleNotificationResponse(self, msg):
self.NotificationReceived(msg)
def fileno(self):
# This should be safe to do without a lock
return self._sock.fileno()
def isready(self):
self._sock_lock.acquire()
try:
rlst, _wlst, _xlst = select.select([self], [], [], 0)
if not rlst:
return False
self._sync()
return True
finally:
self._sock_lock.release()
def server_version(self):
self.verifyState("ready")
if not self._server_version:
raise InterfaceError("Server did not provide server_version parameter.")
return self._server_version
def encoding(self):
return self._client_encoding
message_types = {
"N": NoticeResponse,
"R": AuthenticationRequest,
"S": ParameterStatus,
"K": BackendKeyData,
"Z": ReadyForQuery,
"T": RowDescription,
"E": ErrorResponse,
"D": DataRow,
"C": CommandComplete,
"1": ParseComplete,
"2": BindComplete,
"3": CloseComplete,
"s": PortalSuspended,
"n": NoData,
"I": EmptyQueryResponse,
"t": ParameterDescription,
"A": NotificationResponse,
"c": CopyDone,
"d": CopyData,
"G": CopyInResponse,
"H": CopyOutResponse,
}
| Python |
class MulticastDelegate(object):
def __init__(self):
self.delegates = []
def __iadd__(self, delegate):
self.add(delegate)
return self
def add(self, delegate):
self.delegates.append(delegate)
def __isub__(self, delegate):
self.delegates.remove(delegate)
return self
def __call__(self, *args, **kwargs):
for d in self.delegates:
d(*args, **kwargs)
| Python |
# vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2007-2009, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "Mathieu Fenniak"
import datetime
import decimal
import struct
import math
from errors import (NotSupportedError, ArrayDataParseError, InternalError,
ArrayContentEmptyError, ArrayContentNotHomogenousError,
ArrayContentNotSupportedError, ArrayDimensionsNotConsistentError)
try:
from pytz import utc
except ImportError:
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
class Bytea(str):
pass
class Interval(object):
def __init__(self, microseconds=0, days=0, months=0):
self.microseconds = microseconds
self.days = days
self.months = months
def _setMicroseconds(self, value):
if not isinstance(value, int) and not isinstance(value, long):
raise TypeError("microseconds must be an int or long")
elif not (min_int8 < value < max_int8):
raise OverflowError("microseconds must be representable as a 64-bit integer")
else:
self._microseconds = value
def _setDays(self, value):
if not isinstance(value, int) and not isinstance(value, long):
raise TypeError("days must be an int or long")
elif not (min_int4 < value < max_int4):
raise OverflowError("days must be representable as a 32-bit integer")
else:
self._days = value
def _setMonths(self, value):
if not isinstance(value, int) and not isinstance(value, long):
raise TypeError("months must be an int or long")
elif not (min_int4 < value < max_int4):
raise OverflowError("months must be representable as a 32-bit integer")
else:
self._months = value
microseconds = property(lambda self: self._microseconds, _setMicroseconds)
days = property(lambda self: self._days, _setDays)
months = property(lambda self: self._months, _setMonths)
def __repr__(self):
return "<Interval %s months %s days %s microseconds>" % (self.months, self.days, self.microseconds)
def __cmp__(self, other):
if other == None: return -1
c = cmp(self.months, other.months)
if c != 0: return c
c = cmp(self.days, other.days)
if c != 0: return c
return cmp(self.microseconds, other.microseconds)
def pg_type_info(typ):
value = None
if isinstance(typ, dict):
value = typ["value"]
typ = typ["type"]
data = py_types.get(typ)
if data == None:
raise NotSupportedError("type %r not mapped to pg type" % typ)
# permit the type data to be determined by the value, if provided
inspect_func = data.get("inspect")
if value != None and inspect_func != None:
data = inspect_func(value)
type_oid = data.get("typeoid")
if type_oid == None:
raise InternalError("type %r has no type_oid" % typ)
elif type_oid == -1:
# special case: NULL values
return type_oid, 0
# prefer bin, but go with whatever exists
if data.get("bin_out"):
format = 1
elif data.get("txt_out"):
format = 0
else:
raise InternalError("no conversion fuction for type %r" % typ)
return type_oid, format
def pg_value(value, fc, **kwargs):
typ = type(value)
data = py_types.get(typ)
if data == None:
raise NotSupportedError("type %r not mapped to pg type" % typ)
# permit the type conversion to be determined by the value, if provided
inspect_func = data.get("inspect")
if value != None and inspect_func != None:
data = inspect_func(value)
# special case: NULL values
if data.get("typeoid") == -1:
return None
if fc == 0:
func = data.get("txt_out")
elif fc == 1:
func = data.get("bin_out")
else:
raise InternalError("unrecognized format code %r" % fc)
if func == None:
raise NotSupportedError("type %r, format code %r not supported" % (typ, fc))
return func(value, **kwargs)
def py_type_info(description):
type_oid = description['type_oid']
data = pg_types.get(type_oid)
if data == None:
raise NotSupportedError("type oid %r not mapped to py type" % type_oid)
# prefer bin, but go with whatever exists
if data.get("bin_in"):
format = 1
elif data.get("txt_in"):
format = 0
else:
raise InternalError("no conversion fuction for type oid %r" % type_oid)
return format
def py_value(v, description, **kwargs):
if v == None:
# special case - NULL value
return None
type_oid = description['type_oid']
format = description['format']
data = pg_types.get(type_oid)
if data == None:
raise NotSupportedError("type oid %r not supported" % type_oid)
if format == 0:
func = data.get("txt_in")
elif format == 1:
func = data.get("bin_in")
else:
raise NotSupportedError("format code %r not supported" % format)
if func == None:
raise NotSupportedError("data response format %r, type %r not supported" % (format, type_oid))
return func(v, **kwargs)
def boolrecv(data, **kwargs):
return data == "\x01"
def boolsend(v, **kwargs):
if v:
return "\x01"
else:
return "\x00"
min_int2, max_int2 = -2 ** 15, 2 ** 15
min_int4, max_int4 = -2 ** 31, 2 ** 31
min_int8, max_int8 = -2 ** 63, 2 ** 63
def int_inspect(value):
if min_int2 < value < max_int2:
return {"typeoid": 21, "bin_out": int2send}
elif min_int4 < value < max_int4:
return {"typeoid": 23, "bin_out": int4send}
elif min_int8 < value < max_int8:
return {"typeoid": 20, "bin_out": int8send}
else:
return {"typeoid": 1700, "bin_out": numeric_send}
def int2recv(data, **kwargs):
return struct.unpack("!h", data)[0]
def int2send(v, **kwargs):
return struct.pack("!h", v)
def int4recv(data, **kwargs):
return struct.unpack("!i", data)[0]
def int4send(v, **kwargs):
return struct.pack("!i", v)
def int8recv(data, **kwargs):
return struct.unpack("!q", data)[0]
def int8send(v, **kwargs):
return struct.pack("!q", v)
def float4recv(data, **kwargs):
return struct.unpack("!f", data)[0]
def float8recv(data, **kwargs):
return struct.unpack("!d", data)[0]
def float8send(v, **kwargs):
return struct.pack("!d", v)
def datetime_inspect(value):
if value.tzinfo != None:
# send as timestamptz if timezone is provided
return {"typeoid": 1184, "bin_out": timestamptz_send}
else:
# otherwise send as timestamp
return {"typeoid": 1114, "bin_out": timestamp_send}
def timestamp_recv(data, integer_datetimes, **kwargs):
if integer_datetimes:
# data is 64-bit integer representing milliseconds since 2000-01-01
val = struct.unpack("!q", data)[0]
return datetime.datetime(2000, 1, 1) + datetime.timedelta(microseconds = val)
else:
# data is double-precision float representing seconds since 2000-01-01
val = struct.unpack("!d", data)[0]
return datetime.datetime(2000, 1, 1) + datetime.timedelta(seconds = val)
# return a timezone-aware datetime instance if we're reading from a
# "timestamp with timezone" type. The timezone returned will always be UTC,
# but providing that additional information can permit conversion to local.
def timestamptz_recv(data, **kwargs):
return timestamp_recv(data, **kwargs).replace(tzinfo=utc)
def timestamp_send(v, integer_datetimes, **kwargs):
delta = v - datetime.datetime(2000, 1, 1)
val = delta.microseconds + (delta.seconds * 1000000) + (delta.days * 86400000000)
if integer_datetimes:
# data is 64-bit integer representing milliseconds since 2000-01-01
return struct.pack("!q", val)
else:
# data is double-precision float representing seconds since 2000-01-01
return struct.pack("!d", val / 1000.0 / 1000.0)
def timestamptz_send(v, **kwargs):
# timestamps should be sent as UTC. If they have zone info,
# convert them.
return timestamp_send(v.astimezone(utc).replace(tzinfo=None), **kwargs)
def date_in(data, **kwargs):
year = int(data[0:4])
month = int(data[5:7])
day = int(data[8:10])
return datetime.date(year, month, day)
def date_out(v, **kwargs):
return v.isoformat()
def time_in(data, **kwargs):
hour = int(data[0:2])
minute = int(data[3:5])
sec = decimal.Decimal(data[6:])
return datetime.time(hour, minute, int(sec), int((sec - int(sec)) * 1000000))
def time_out(v, **kwargs):
return v.isoformat()
def numeric_in(data, **kwargs):
if data.find(".") == -1:
return int(data)
else:
return decimal.Decimal(data)
def numeric_recv(data, **kwargs):
num_digits, weight, sign, scale = struct.unpack("!hhhh", data[:8])
data = data[8:]
digits = struct.unpack("!" + ("h" * num_digits), data)
weight = decimal.Decimal(weight)
retval = 0
for d in digits:
d = decimal.Decimal(d)
retval += d * (10000 ** weight)
weight -= 1
if sign:
retval *= -1
return retval
DEC_DIGITS = 4
def numeric_send(d, **kwargs):
# This is a very straight port of src/backend/utils/adt/numeric.c set_var_from_str()
s = str(d)
pos = 0
sign = 0
if s[0] == '-':
sign = 0x4000 # NEG
pos=1
elif s[0] == '+':
sign = 0 # POS
pos=1
have_dp = False
decdigits = [0, 0, 0, 0]
dweight = -1
dscale = 0
for char in s[pos:]:
if char.isdigit():
decdigits.append(int(char))
if not have_dp:
dweight += 1
else:
dscale += 1
pos+=1
elif char == '.':
have_dp = True
pos+=1
else:
break
if len(s) > pos:
char = s[pos]
if char == 'e' or char == 'E':
pos+=1
exponent = int(s[pos:])
dweight += exponent
dscale -= exponent
if dscale < 0: dscale = 0
if dweight >= 0:
weight = (dweight + 1 + DEC_DIGITS - 1) / DEC_DIGITS - 1
else:
weight = -((-dweight - 1) / DEC_DIGITS + 1)
offset = (weight + 1) * DEC_DIGITS - (dweight + 1)
ndigits = (len(decdigits)-DEC_DIGITS + offset + DEC_DIGITS - 1) / DEC_DIGITS
i = DEC_DIGITS - offset
decdigits.extend([0, 0, 0])
ndigits_ = ndigits
digits = ''
while ndigits_ > 0:
# ifdef DEC_DIGITS == 4
digits += struct.pack("!h", ((decdigits[i] * 10 + decdigits[i + 1]) * 10 + decdigits[i + 2]) * 10 + decdigits[i + 3])
ndigits_ -= 1
i += DEC_DIGITS
# strip_var()
if ndigits == 0:
sign = 0x4000 # pos
weight = 0
# ----------
retval = struct.pack("!hhhh", ndigits, weight, sign, dscale) + digits
return retval
def numeric_out(v, **kwargs):
return str(v)
# PostgreSQL encodings:
# http://www.postgresql.org/docs/8.3/interactive/multibyte.html
# Python encodings:
# http://www.python.org/doc/2.4/lib/standard-encodings.html
#
# Commented out encodings don't require a name change between PostgreSQL and
# Python. If the py side is None, then the encoding isn't supported.
pg_to_py_encodings = {
# Not supported:
"mule_internal": None,
"euc_tw": None,
# Name fine as-is:
#"euc_jp",
#"euc_jis_2004",
#"euc_kr",
#"gb18030",
#"gbk",
#"johab",
#"sjis",
#"shift_jis_2004",
#"uhc",
#"utf8",
# Different name:
"euc_cn": "gb2312",
"iso_8859_5": "is8859_5",
"iso_8859_6": "is8859_6",
"iso_8859_7": "is8859_7",
"iso_8859_8": "is8859_8",
"koi8": "koi8_r",
"latin1": "iso8859-1",
"latin2": "iso8859_2",
"latin3": "iso8859_3",
"latin4": "iso8859_4",
"latin5": "iso8859_9",
"latin6": "iso8859_10",
"latin7": "iso8859_13",
"latin8": "iso8859_14",
"latin9": "iso8859_15",
"sql_ascii": "ascii",
"win866": "cp886",
"win874": "cp874",
"win1250": "cp1250",
"win1251": "cp1251",
"win1252": "cp1252",
"win1253": "cp1253",
"win1254": "cp1254",
"win1255": "cp1255",
"win1256": "cp1256",
"win1257": "cp1257",
"win1258": "cp1258",
}
def encoding_convert(encoding):
return pg_to_py_encodings.get(encoding.lower(), encoding)
def varcharin(data, client_encoding, **kwargs):
return unicode(data, encoding_convert(client_encoding))
def textout(v, client_encoding, **kwargs):
if isinstance(v, unicode):
return v.encode(encoding_convert(client_encoding))
else:
return v
def byteasend(v, **kwargs):
return str(v)
def bytearecv(data, **kwargs):
return Bytea(data)
# interval support does not provide a Python-usable interval object yet
def interval_recv(data, integer_datetimes, **kwargs):
if integer_datetimes:
microseconds, days, months = struct.unpack("!qii", data)
else:
seconds, days, months = struct.unpack("!dii", data)
microseconds = int(seconds * 1000 * 1000)
return Interval(microseconds, days, months)
def interval_send(data, integer_datetimes, **kwargs):
if integer_datetimes:
return struct.pack("!qii", data.microseconds, data.days, data.months)
else:
return struct.pack("!dii", data.microseconds / 1000.0 / 1000.0, data.days, data.months)
def array_recv(data, **kwargs):
dim, hasnull, typeoid = struct.unpack("!iii", data[:12])
data = data[12:]
# get type conversion method for typeoid
conversion = pg_types[typeoid]["bin_in"]
# Read dimension info
dim_lengths = []
element_count = 1
for idim in range(dim):
dim_len, dim_lbound = struct.unpack("!ii", data[:8])
data = data[8:]
dim_lengths.append(dim_len)
element_count *= dim_len
# Read all array values
array_values = []
for i in range(element_count):
if len(data):
element_len, = struct.unpack("!i", data[:4])
data = data[4:]
if element_len == -1:
array_values.append(None)
else:
array_values.append(conversion(data[:element_len], **kwargs))
data = data[element_len:]
if data != "":
raise ArrayDataParseError("unexpected data left over after array read")
# at this point, {{1,2,3},{4,5,6}}::int[][] looks like [1,2,3,4,5,6].
# go through the dimensions and fix up the array contents to match
# expected dimensions
for dim_length in reversed(dim_lengths[1:]):
val = []
while array_values:
val.append(array_values[:dim_length])
array_values = array_values[dim_length:]
array_values = val
return array_values
def array_inspect(value):
# Check if array has any values. If not, we can't determine the proper
# array typeoid.
first_element = array_find_first_element(value)
if first_element == None:
raise ArrayContentEmptyError("array has no values")
# supported array output
typ = type(first_element)
if issubclass(typ, int) or issubclass(typ, long):
# special int array support -- send as smallest possible array type
special_int_support = True
int2_ok, int4_ok, int8_ok = True, True, True
for v in array_flatten(value):
if v == None:
continue
if min_int2 < v < max_int2:
continue
int2_ok = False
if min_int4 < v < max_int4:
continue
int4_ok = False
if min_int8 < v < max_int8:
continue
int8_ok = False
if int2_ok:
array_typeoid = 1005 # INT2[]
elif int4_ok:
array_typeoid = 1007 # INT4[]
elif int8_ok:
array_typeoid = 1016 # INT8[]
else:
raise ArrayContentNotSupportedError("numeric not supported as array contents")
else:
special_int_support = False
array_typeoid = py_array_types.get(typ)
if array_typeoid == None:
raise ArrayContentNotSupportedError("type %r not supported as array contents" % typ)
# check for homogenous array
for v in array_flatten(value):
if v != None and not (isinstance(v, typ) or (typ == long and isinstance(v, int)) or (typ == int and isinstance(v, long))):
raise ArrayContentNotHomogenousError("not all array elements are of type %r" % typ)
# check that all array dimensions are consistent
array_check_dimensions(value)
type_data = py_types[typ]
if special_int_support:
if array_typeoid == 1005:
type_data = {"typeoid": 21, "bin_out": int2send}
elif array_typeoid == 1007:
type_data = {"typeoid": 23, "bin_out": int4send}
elif array_typeoid == 1016:
type_data = {"typeoid": 20, "bin_out": int8send}
else:
type_data = py_types[typ]
return {
"typeoid": array_typeoid,
"bin_out": array_send(type_data["typeoid"], type_data["bin_out"])
}
def array_find_first_element(arr):
for v in array_flatten(arr):
if v != None:
return v
return None
def array_flatten(arr):
for v in arr:
if isinstance(v, list):
for v2 in array_flatten(v):
yield v2
else:
yield v
def array_check_dimensions(arr):
v0 = arr[0]
if isinstance(v0, list):
req_len = len(v0)
req_inner_lengths = array_check_dimensions(v0)
for v in arr:
inner_lengths = array_check_dimensions(v)
if len(v) != req_len or inner_lengths != req_inner_lengths:
raise ArrayDimensionsNotConsistentError("array dimensions not consistent")
retval = [req_len]
retval.extend(req_inner_lengths)
return retval
else:
# make sure nothing else at this level is a list
for v in arr:
if isinstance(v, list):
raise ArrayDimensionsNotConsistentError("array dimensions not consistent")
return []
def array_has_null(arr):
for v in array_flatten(arr):
if v == None:
return True
return False
def array_dim_lengths(arr):
v0 = arr[0]
if isinstance(v0, list):
retval = [len(v0)]
retval.extend(array_dim_lengths(v0))
else:
return [len(arr)]
return retval
class array_send(object):
def __init__(self, typeoid, bin_out_func):
self.typeoid = typeoid
self.bin_out_func = bin_out_func
def __call__(self, arr, **kwargs):
has_null = array_has_null(arr)
dim_lengths = array_dim_lengths(arr)
data = struct.pack("!iii", len(dim_lengths), has_null, self.typeoid)
for i in dim_lengths:
data += struct.pack("!ii", i, 1)
for v in array_flatten(arr):
if v == None:
data += struct.pack("!i", -1)
else:
inner_data = self.bin_out_func(v, **kwargs)
data += struct.pack("!i", len(inner_data))
data += inner_data
return data
py_types = {
bool: {"typeoid": 16, "bin_out": boolsend},
int: {"inspect": int_inspect},
long: {"inspect": int_inspect},
str: {"typeoid": 25, "bin_out": textout},
unicode: {"typeoid": 25, "bin_out": textout},
float: {"typeoid": 701, "bin_out": float8send},
decimal.Decimal: {"typeoid": 1700, "bin_out": numeric_send},
Bytea: {"typeoid": 17, "bin_out": byteasend},
datetime.datetime: {"typeoid": 1114, "bin_out": timestamp_send, "inspect": datetime_inspect},
datetime.date: {"typeoid": 1082, "txt_out": date_out},
datetime.time: {"typeoid": 1083, "txt_out": time_out},
Interval: {"typeoid": 1186, "bin_out": interval_send},
type(None): {"typeoid": -1},
list: {"inspect": array_inspect},
}
# py type -> pg array typeoid
py_array_types = {
float: 1022,
bool: 1000,
str: 1009, # TEXT[]
unicode: 1009, # TEXT[]
decimal.Decimal: 1231, # NUMERIC[]
}
pg_types = {
16: {"bin_in": boolrecv},
17: {"bin_in": bytearecv},
19: {"bin_in": varcharin}, # name type
20: {"bin_in": int8recv},
21: {"bin_in": int2recv},
23: {"bin_in": int4recv, "txt_in": numeric_in},
25: {"bin_in": varcharin, "txt_in": varcharin}, # TEXT type
26: {"txt_in": numeric_in}, # oid type
142: {"bin_in": varcharin, "txt_in": varcharin}, # XML
194: {"bin_in": varcharin}, # "string representing an internal node tree"
700: {"bin_in": float4recv},
701: {"bin_in": float8recv},
705: {"txt_in": varcharin}, # UNKNOWN
829: {"txt_in": varcharin}, # MACADDR type
1000: {"bin_in": array_recv}, # BOOL[]
1003: {"bin_in": array_recv}, # NAME[]
1005: {"bin_in": array_recv}, # INT2[]
1007: {"bin_in": array_recv, "txt_in": varcharin}, # INT4[]
1009: {"bin_in": array_recv}, # TEXT[]
1014: {"bin_in": array_recv}, # CHAR[]
1015: {"bin_in": array_recv}, # VARCHAR[]
1016: {"bin_in": array_recv}, # INT8[]
1021: {"bin_in": array_recv}, # FLOAT4[]
1022: {"bin_in": array_recv}, # FLOAT8[]
1042: {"bin_in": varcharin}, # CHAR type
1043: {"bin_in": varcharin}, # VARCHAR type
1082: {"txt_in": date_in},
1083: {"txt_in": time_in},
1114: {"bin_in": timestamp_recv},
1184: {"bin_in": timestamptz_recv}, # timestamp w/ tz
1186: {"bin_in": interval_recv},
1231: {"bin_in": array_recv}, # NUMERIC[]
1263: {"bin_in": array_recv}, # cstring[]
1700: {"bin_in": numeric_recv},
2275: {"bin_in": varcharin}, # cstring
}
| Python |
# vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2007-2009, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "Mathieu Fenniak"
import dbapi as DBAPI
pg8000_dbapi = DBAPI
from interface import *
from types import Bytea
| Python |
# vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2007-2009, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "Mathieu Fenniak"
import socket
import protocol
import threading
from errors import *
def conninfo_parse(conninfo):
"Conninfo parser routine based on libpq conninfo_parse"
options = {}
buf = conninfo + " "
tmp = pname = ""
quoted_string = False
cp = 0
while cp < len(buf):
# Skip blanks before the parameter name
c = buf[cp]
if c.isspace() and tmp and not quoted_string and pname:
options[pname] = tmp
tmp = pname = ""
elif c == "'":
quoted_string = not quoted_string
elif c == '\\':
cp += 1
tmp += buf[cp]
elif c == "=":
if not tmp:
raise RuntimeError("missing parameter name (conninfo:%s)" % cp)
pname = tmp
tmp = ""
elif not c.isspace() or quoted_string:
tmp += c
cp += 1
if quoted_string:
raise RuntimeError("unterminated quoted string (conninfo:%s)" % cp)
return options
class DataIterator(object):
def __init__(self, obj, func):
self.obj = obj
self.func = func
def __iter__(self):
return self
def next(self):
retval = self.func(self.obj)
if retval == None:
raise StopIteration()
return retval
statement_number_lock = threading.Lock()
statement_number = 0
##
# This class represents a prepared statement. A prepared statement is
# pre-parsed on the server, which reduces the need to parse the query every
# time it is run. The statement can have parameters in the form of $1, $2, $3,
# etc. When parameters are used, the types of the parameters need to be
# specified when creating the prepared statement.
# <p>
# As of v1.01, instances of this class are thread-safe. This means that a
# single PreparedStatement can be accessed by multiple threads without the
# internal consistency of the statement being altered. However, the
# responsibility is on the client application to ensure that one thread reading
# from a statement isn't affected by another thread starting a new query with
# the same statement.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
#
# @param connection An instance of {@link Connection Connection}.
#
# @param statement The SQL statement to be represented, often containing
# parameters in the form of $1, $2, $3, etc.
#
# @param types Python type objects for each parameter in the SQL
# statement. For example, int, float, str.
class PreparedStatement(object):
##
# Determines the number of rows to read from the database server at once.
# Reading more rows increases performance at the cost of memory. The
# default value is 100 rows. The affect of this parameter is transparent.
# That is, the library reads more rows when the cache is empty
# automatically.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx. It is
# possible that implementation changes in the future could cause this
# parameter to be ignored.
row_cache_size = 100
def __init__(self, connection, statement, *types, **kwargs):
global statement_number
if connection == None or connection.c == None:
raise InterfaceError("connection not provided")
try:
statement_number_lock.acquire()
self._statement_number = statement_number
statement_number += 1
finally:
statement_number_lock.release()
self.c = connection.c
self._portal_name = None
self._statement_name = kwargs.get("statement_name", "pg8000_statement_%s" % self._statement_number)
self._row_desc = None
self._cached_rows = []
self._ongoing_row_count = 0
self._command_complete = True
self._parse_row_desc = self.c.parse(self._statement_name, statement, types)
self._lock = threading.RLock()
def close(self):
if self._statement_name != "": # don't close unnamed statement
self.c.close_statement(self._statement_name)
if self._portal_name != None:
self.c.close_portal(self._portal_name)
self._portal_name = None
row_description = property(lambda self: self._getRowDescription())
def _getRowDescription(self):
if self._row_desc == None:
return None
return self._row_desc.fields
##
# Run the SQL prepared statement with the given parameters.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def execute(self, *args, **kwargs):
self._lock.acquire()
try:
if not self._command_complete:
# cleanup last execute
self._cached_rows = []
self._ongoing_row_count = 0
if self._portal_name != None:
self.c.close_portal(self._portal_name)
self._command_complete = False
self._portal_name = "pg8000_portal_%s" % self._statement_number
self._row_desc, cmd = self.c.bind(self._portal_name, self._statement_name, args, self._parse_row_desc, kwargs.get("stream"))
if self._row_desc:
# We execute our cursor right away to fill up our cache. This
# prevents the cursor from being destroyed, apparently, by a rogue
# Sync between Bind and Execute. Since it is quite likely that
# data will be read from us right away anyways, this seems a safe
# move for now.
self._fill_cache()
else:
self._command_complete = True
self._ongoing_row_count = -1
if cmd != None and cmd.rows != None:
self._ongoing_row_count = cmd.rows
finally:
self._lock.release()
def _fill_cache(self):
self._lock.acquire()
try:
if self._cached_rows:
raise InternalError("attempt to fill cache that isn't empty")
end_of_data, rows = self.c.fetch_rows(self._portal_name, self.row_cache_size, self._row_desc)
self._cached_rows = rows
if end_of_data:
self._command_complete = True
finally:
self._lock.release()
def _fetch(self):
if not self._row_desc:
raise ProgrammingError("no result set")
self._lock.acquire()
try:
if not self._cached_rows:
if self._command_complete:
return None
self._fill_cache()
if self._command_complete and not self._cached_rows:
# fill cache tells us the command is complete, but yet we have
# no rows after filling our cache. This is a special case when
# a query returns no rows.
return None
row = self._cached_rows.pop(0)
self._ongoing_row_count += 1
return tuple(row)
finally:
self._lock.release()
##
# Return a count of the number of rows relevant to the executed statement.
# For a SELECT, this is the number of rows returned. For UPDATE or DELETE,
# this the number of rows affected. For INSERT, the number of rows
# inserted. This property may have a value of -1 to indicate that there
# was no row count.
# <p>
# During a result-set query (eg. SELECT, or INSERT ... RETURNING ...),
# accessing this property requires reading the entire result-set into
# memory, as reading the data to completion is the only way to determine
# the total number of rows. Avoid using this property in with
# result-set queries, as it may cause unexpected memory usage.
# <p>
# Stability: Added in v1.03, stability guaranteed for v1.xx.
row_count = property(lambda self: self._get_row_count())
def _get_row_count(self):
self._lock.acquire()
try:
if not self._command_complete:
end_of_data, rows = self.c.fetch_rows(self._portal_name, 0, self._row_desc)
self._cached_rows += rows
if end_of_data:
self._command_complete = True
else:
raise InternalError("fetch_rows(0) did not hit end of data")
return self._ongoing_row_count + len(self._cached_rows)
finally:
self._lock.release()
##
# Read a row from the database server, and return it in a dictionary
# indexed by column name/alias. This method will raise an error if two
# columns have the same name. Returns None after the last row.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def read_dict(self):
row = self._fetch()
if row == None:
return row
retval = {}
for i in range(len(self._row_desc.fields)):
col_name = self._row_desc.fields[i]['name']
if retval.has_key(col_name):
raise InterfaceError("cannot return dict of row when two columns have the same name (%r)" % (col_name,))
retval[col_name] = row[i]
return retval
##
# Read a row from the database server, and return it as a tuple of values.
# Returns None after the last row.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def read_tuple(self):
return self._fetch()
##
# Return an iterator for the output of this statement. The iterator will
# return a tuple for each row, in the same manner as {@link
# #PreparedStatement.read_tuple read_tuple}.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def iterate_tuple(self):
return DataIterator(self, PreparedStatement.read_tuple)
##
# Return an iterator for the output of this statement. The iterator will
# return a dict for each row, in the same manner as {@link
# #PreparedStatement.read_dict read_dict}.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def iterate_dict(self):
return DataIterator(self, PreparedStatement.read_dict)
class SimpleStatement(PreparedStatement):
"Internal wrapper to Simple Query protocol emulating a PreparedStatement"
# This should be used internally only for trivial queries
# (not a true Prepared Statement, in fact it can have multiple statements)
# See Simple Query Protocol limitations and trade-offs (send_simple_query)
row_cache_size = None
def __init__(self, connection, statement):
if connection == None or connection.c == None:
raise InterfaceError("connection not provided")
self.c = connection.c
self._row_desc = None
self._cached_rows = []
self._ongoing_row_count = -1
self._command_complete = True
self.statement = statement
self._lock = threading.RLock()
def close(self):
# simple query doesn't have portals
pass
def execute(self, *args, **kwargs):
"Run the SQL simple query stataments"
self._lock.acquire()
try:
self._row_desc, cmd_complete, self._cached_rows = \
self.c.send_simple_query(self.statement, kwargs.get("stream"))
self._command_complete = True
self._ongoing_row_count = -1
if cmd_complete is not None and cmd_complete.rows is not None:
self._ongoing_row_count = cmd_complete.rows
finally:
self._lock.release()
def _fill_cache(self):
# data rows are already fetched in _cached_rows
pass
def _fetch(self):
if not self._row_desc:
raise ProgrammingError("no result set")
self._lock.acquire()
try:
if not self._cached_rows:
return None
row = self._cached_rows.pop(0)
return tuple(row)
finally:
self._lock.release()
def _get_row_count(self):
return self._ongoing_row_count
##
# The Cursor class allows multiple queries to be performed concurrently with a
# single PostgreSQL connection. The Cursor object is implemented internally by
# using a {@link PreparedStatement PreparedStatement} object, so if you plan to
# use a statement multiple times, you might as well create a PreparedStatement
# and save a small amount of reparsing time.
# <p>
# As of v1.01, instances of this class are thread-safe. See {@link
# PreparedStatement PreparedStatement} for more information.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
#
# @param connection An instance of {@link Connection Connection}.
class Cursor(object):
def __init__(self, connection):
self.connection = connection
self._stmt = None
def require_stmt(func):
def retval(self, *args, **kwargs):
if self._stmt == None:
raise ProgrammingError("attempting to use unexecuted cursor")
return func(self, *args, **kwargs)
return retval
row_description = property(lambda self: self._getRowDescription())
def _getRowDescription(self):
if self._stmt == None:
return None
return self._stmt.row_description
##
# Run an SQL statement using this cursor. The SQL statement can have
# parameters in the form of $1, $2, $3, etc., which will be filled in by
# the additional arguments passed to this function.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
# @param query The SQL statement to execute.
def execute(self, query, *args, **kwargs):
if self.connection.is_closed:
raise ConnectionClosedError()
self.connection._unnamed_prepared_statement_lock.acquire()
try:
if kwargs.get("simple_query"):
# no arguments and no statement name,
# use PostgreSQL Simple Query Protocol
## print "SimpleQuery:", query
self._stmt = SimpleStatement(self.connection, query)
else:
# use PostgreSQL Extended Query Protocol
self._stmt = PreparedStatement(self.connection, query, statement_name="", *[{"type": type(x), "value": x} for x in args])
self._stmt.execute(*args, **kwargs)
finally:
self.connection._unnamed_prepared_statement_lock.release()
##
# Return a count of the number of rows currently being read. If possible,
# please avoid using this function. It requires reading the entire result
# set from the database to determine the number of rows being returned.
# <p>
# Stability: Added in v1.03, stability guaranteed for v1.xx.
# Implementation currently requires caching entire result set into memory,
# avoid using this property.
row_count = property(lambda self: self._get_row_count())
@require_stmt
def _get_row_count(self):
return self._stmt.row_count
##
# Read a row from the database server, and return it in a dictionary
# indexed by column name/alias. This method will raise an error if two
# columns have the same name. Returns None after the last row.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
@require_stmt
def read_dict(self):
return self._stmt.read_dict()
##
# Read a row from the database server, and return it as a tuple of values.
# Returns None after the last row.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
@require_stmt
def read_tuple(self):
return self._stmt.read_tuple()
##
# Return an iterator for the output of this statement. The iterator will
# return a tuple for each row, in the same manner as {@link
# #PreparedStatement.read_tuple read_tuple}.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
@require_stmt
def iterate_tuple(self):
return self._stmt.iterate_tuple()
##
# Return an iterator for the output of this statement. The iterator will
# return a dict for each row, in the same manner as {@link
# #PreparedStatement.read_dict read_dict}.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
@require_stmt
def iterate_dict(self):
return self._stmt.iterate_dict()
def close(self):
if self._stmt != None:
self._stmt.close()
self._stmt = None
##
# Return the fileno of the underlying socket for this cursor's connection.
# <p>
# Stability: Added in v1.07, stability guaranteed for v1.xx.
def fileno(self):
return self.connection.fileno()
##
# Poll the underlying socket for this cursor and sync if there is data waiting
# to be read. This has the effect of flushing asynchronous messages from the
# backend. Returns True if messages were read, False otherwise.
# <p>
# Stability: Added in v1.07, stability guaranteed for v1.xx.
def isready(self):
return self.connection.isready()
##
# This class represents a connection to a PostgreSQL database.
# <p>
# The database connection is derived from the {@link #Cursor Cursor} class,
# which provides a default cursor for running queries. It also provides
# transaction control via the 'begin', 'commit', and 'rollback' methods.
# Without beginning a transaction explicitly, all statements will autocommit to
# the database.
# <p>
# As of v1.01, instances of this class are thread-safe. See {@link
# PreparedStatement PreparedStatement} for more information.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
#
# @param user The username to connect to the PostgreSQL server with. This
# parameter is required.
#
# @keyparam host The hostname of the PostgreSQL server to connect with.
# Providing this parameter is necessary for TCP/IP connections. One of either
# host, or unix_sock, must be provided.
#
# @keyparam unix_sock The path to the UNIX socket to access the database
# through, for example, '/tmp/.s.PGSQL.5432'. One of either unix_sock or host
# must be provided. The port parameter will have no affect if unix_sock is
# provided.
#
# @keyparam port The TCP/IP port of the PostgreSQL server instance. This
# parameter defaults to 5432, the registered and common port of PostgreSQL
# TCP/IP servers.
#
# @keyparam database The name of the database instance to connect with. This
# parameter is optional, if omitted the PostgreSQL server will assume the
# database name is the same as the username.
#
# @keyparam password The user password to connect to the server with. This
# parameter is optional. If omitted, and the database server requests password
# based authentication, the connection will fail. On the other hand, if this
# parameter is provided and the database does not request password
# authentication, then the password will not be used.
#
# @keyparam socket_timeout Socket connect timeout measured in seconds.
# Defaults to 60 seconds.
#
# @keyparam ssl Use SSL encryption for TCP/IP socket. Defaults to False.
class Connection(Cursor):
def __init__(self, dsn="", user=None, host=None, unix_sock=None, port=5432, database=None, password=None, socket_timeout=60, ssl=False):
self._row_desc = None
if dsn:
# update connection parameters parsed of the conninfo dsn
opts = conninfo_parse(dsn)
database = opts.get("dbname", database)
user = opts.get("user", user)
password = opts.get("password", user)
host = opts.get("host", host)
port = int(opts.get("port", port))
ssl = opts.get("sslmode", 'disable') != 'disable'
try:
self.c = protocol.Connection(unix_sock=unix_sock, host=host, port=port, socket_timeout=socket_timeout, ssl=ssl)
self.c.authenticate(user, password=password, database=database)
except socket.error, e:
raise InterfaceError("communication error", e)
Cursor.__init__(self, self)
self._begin = PreparedStatement(self, "BEGIN TRANSACTION")
self._commit = PreparedStatement(self, "COMMIT TRANSACTION")
self._rollback = PreparedStatement(self, "ROLLBACK TRANSACTION")
self._unnamed_prepared_statement_lock = threading.RLock()
self.in_transaction = False
self.autocommit = False
##
# An event handler that is fired when NOTIFY occurs for a notification that
# has been LISTEN'd for. The value of this property is a
# util.MulticastDelegate. A callback can be added by using
# connection.NotificationReceived += SomeMethod. The method will be called
# with a single argument, an object that has properties: backend_pid,
# condition, and additional_info. Callbacks can be removed with the -=
# operator.
# <p>
# Stability: Added in v1.03, stability guaranteed for v1.xx.
NotificationReceived = property(
lambda self: getattr(self.c, "NotificationReceived"),
lambda self, value: setattr(self.c, "NotificationReceived", value)
)
##
# An event handler that is fired when the database server issues a notice.
# The value of this property is a util.MulticastDelegate. A callback can
# be added by using connection.NotificationReceived += SomeMethod. The
# method will be called with a single argument, an object that has
# properties: severity, code, msg, and possibly others (detail, hint,
# position, where, file, line, and routine). Callbacks can be removed with
# the -= operator.
# <p>
# Stability: Added in v1.03, stability guaranteed for v1.xx.
NoticeReceived = property(
lambda self: getattr(self.c, "NoticeReceived"),
lambda self, value: setattr(self.c, "NoticeReceived", value)
)
##
# An event handler that is fired when a runtime configuration option is
# changed on the server. The value of this property is a
# util.MulticastDelegate. A callback can be added by using
# connection.NotificationReceived += SomeMethod. Callbacks can be removed
# with the -= operator. The method will be called with a single argument,
# an object that has properties "key" and "value".
# <p>
# Stability: Added in v1.03, stability guaranteed for v1.xx.
ParameterStatusReceived = property(
lambda self: getattr(self.c, "ParameterStatusReceived"),
lambda self, value: setattr(self.c, "ParameterStatusReceived", value)
)
##
# Begins a new transaction.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def begin(self):
if self.is_closed:
raise ConnectionClosedError()
if self.autocommit:
return
self._begin.execute()
self.in_transaction = True
##
# Commits the running transaction.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def commit(self):
if self.is_closed:
raise ConnectionClosedError()
self._commit.execute()
self.in_transaction = False
##
# Rolls back the running transaction.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def rollback(self):
if self.is_closed:
raise ConnectionClosedError()
self._rollback.execute()
self.in_transaction = False
##
# Closes an open connection.
def close(self):
if self.is_closed:
raise ConnectionClosedError()
self.c.close()
self.c = None
is_closed = property(lambda self: self.c == None)
##
# Return the fileno of the underlying socket for this connection.
# <p>
# Stability: Added in v1.07, stability guaranteed for v1.xx.
def fileno(self):
return self.c.fileno()
##
# Poll the underlying socket for this connection and sync if there is data
# waiting to be read. This has the effect of flushing asynchronous
# messages from the backend. Returns True if messages were read, False
# otherwise.
# <p>
# Stability: Added in v1.07, stability guaranteed for v1.xx.
def isready(self):
return self.c.isready()
##
# Return the server_version as reported from the connected server.
# Raises InterfaceError if no version has been reported from the server.
def server_version(self):
return self.c.server_version()
def encoding(self, encoding=None):
"Returns the client_encoding as reported from the connected server"
return self.c.encoding() | Python |
# vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2007-2009, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "Mathieu Fenniak"
class Warning(StandardError):
pass
class Error(StandardError):
pass
class InterfaceError(Error):
pass
class ConnectionClosedError(InterfaceError):
def __init__(self):
InterfaceError.__init__(self, "connection is closed")
class CursorClosedError(InterfaceError):
def __init__(self):
InterfaceError.__init__(self, "cursor is closed")
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
##
# An exception that is thrown when an internal error occurs trying to
# decode binary array data from the server.
class ArrayDataParseError(InternalError):
pass
##
# Thrown when attempting to transmit an array of unsupported data types.
class ArrayContentNotSupportedError(NotSupportedError):
pass
##
# Thrown when attempting to send an array that doesn't contain all the same
# type of objects (eg. some floats, some ints).
class ArrayContentNotHomogenousError(ProgrammingError):
pass
##
# Attempted to pass an empty array in, but it's not possible to determine the
# data type for an empty array.
class ArrayContentEmptyError(ProgrammingError):
pass
##
# Attempted to use a multidimensional array with inconsistent array sizes.
class ArrayDimensionsNotConsistentError(ProgrammingError):
pass
# A cursor's copy_to or copy_from argument was not provided a table or query
# to operate on.
class CopyQueryOrTableRequiredError(ProgrammingError):
pass
# Raised if a COPY query is executed without using copy_to or copy_from
# functions to provide a data stream.
class CopyQueryWithoutStreamError(ProgrammingError):
pass
# When query parameters don't match up with query args.
class QueryParameterIndexError(ProgrammingError):
pass
# Some sort of parse error occured during query parameterization.
class QueryParameterParseError(ProgrammingError):
pass
| Python |
def autoretry_datastore_timeouts(attempts=5.0, interval=0.1, exponent=2.0):
"""
Copyright (C) 2009 twitter.com/rcb
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
======================================================================
This function wraps the AppEngine Datastore API to autoretry
datastore timeouts at the lowest accessible level.
The benefits of this approach are:
1. Small Footprint: Does not monkey with Model internals
which may break in future releases.
2. Max Performance: Retrying at this lowest level means
serialization and key formatting is not
needlessly repeated on each retry.
At initialization time, execute this:
>>> autoretry_datastore_timeouts()
Should only be called once, subsequent calls have no effect.
>>> autoretry_datastore_timeouts() # no effect
Default (5) attempts: .1, .2, .4, .8, 1.6 seconds
Parameters can each be specified as floats.
:param attempts: maximum number of times to retry.
:param interval: base seconds to sleep between retries.
:param exponent: rate of exponential back-off.
"""
import time
import logging
from google.appengine.api import apiproxy_stub_map
from google.appengine.runtime import apiproxy_errors
from google.appengine.datastore import datastore_pb
attempts = float(attempts)
interval = float(interval)
exponent = float(exponent)
wrapped = apiproxy_stub_map.MakeSyncCall
errors = {datastore_pb.Error.TIMEOUT: 'Timeout',
datastore_pb.Error.CONCURRENT_TRANSACTION: 'TransactionFailedError'}
def wrapper(*args, **kwargs):
count = 0.0
while True:
try:
return wrapped(*args, **kwargs)
except apiproxy_errors.ApplicationError, err:
errno = err.application_error
if errno not in errors:
raise
sleep = (exponent ** count) * interval
count += 1.0
if count > attempts:
raise
msg = "Datastore %s: retry #%d in %s seconds.\n%s"
vals = ''
if count == 1.0:
vals = '\n'.join([str(a) for a in args])
logging.warning(msg % (errors[errno], count, sleep, vals))
time.sleep(sleep)
setattr(wrapper, '_autoretry_datastore_timeouts', False)
if getattr(wrapped, '_autoretry_datastore_timeouts', True):
apiproxy_stub_map.MakeSyncCall = wrapper
| Python |
from types import StringType, ListType, TupleType
from copy import deepcopy
from Elements import *
DEFAULT_TAB_WIDTH = 720
ParagraphAlignmentMap = { ParagraphPropertySet.LEFT : 'ql',
ParagraphPropertySet.RIGHT : 'qr',
ParagraphPropertySet.CENTER : 'qc',
ParagraphPropertySet.JUSTIFY : 'qj',
ParagraphPropertySet.DISTRIBUTE : 'qd' }
TabAlignmentMap = { TabPropertySet.LEFT : '',
TabPropertySet.RIGHT : 'tqr',
TabPropertySet.CENTER : 'tqc',
TabPropertySet.DECIMAL : 'tqdec' }
TableAlignmentMap = { Table.LEFT : 'trql',
Table.RIGHT : 'trqr',
Table.CENTER : 'trqc' }
CellAlignmentMap = { Cell.ALIGN_TOP : '', # clvertalt
Cell.ALIGN_CENTER : 'clvertalc',
Cell.ALIGN_BOTTOM : 'clvertalb' }
CellFlowMap = { Cell.FLOW_LR_TB : '', # cltxlrtb, Text in a cell flows from left to right and top to bottom (default)
Cell.FLOW_RL_TB : 'cltxtbrl', # Text in a cell flows right to left and top to bottom
Cell.FLOW_LR_BT : 'cltxbtlr', # Text in a cell flows left to right and bottom to top
Cell.FLOW_VERTICAL_LR_TB : 'cltxlrtbv', # Text in a cell flows left to right and top to bottom, vertical
Cell.FLOW_VERTICAL_TB_RL : 'cltxtbrlv' } # Text in a cell flows top to bottom and right to left, vertical
ShadingPatternMap = { ShadingPropertySet.HORIZONTAL : 'bghoriz',
ShadingPropertySet.VERTICAL : 'bgvert',
ShadingPropertySet.FORWARD_DIAGONAL : 'bgfdiag',
ShadingPropertySet.BACKWARD_DIAGONAL : 'bgbdiag',
ShadingPropertySet.VERTICAL_CROSS : 'bgcross',
ShadingPropertySet.DIAGONAL_CROSS : 'bgdcross',
ShadingPropertySet.DARK_HORIZONTAL : 'bgdkhoriz',
ShadingPropertySet.DARK_VERTICAL : 'bgdkvert',
ShadingPropertySet.DARK_FORWARD_DIAGONAL : 'bgdkfdiag',
ShadingPropertySet.DARK_BACKWARD_DIAGONAL : 'bgdkbdiag',
ShadingPropertySet.DARK_VERTICAL_CROSS : 'bgdkcross',
ShadingPropertySet.DARK_DIAGONAL_CROSS : 'bgdkdcross' }
TabLeaderMap = { TabPropertySet.DOTS : 'tldot',
TabPropertySet.HYPHENS : 'tlhyph',
TabPropertySet.UNDERLINE : 'tlul',
TabPropertySet.THICK_LINE : 'tlth',
TabPropertySet.EQUAL_SIGN : 'tleq' }
BorderStyleMap = { BorderPropertySet.SINGLE : 'brdrs',
BorderPropertySet.DOUBLE : 'brdrth',
BorderPropertySet.SHADOWED : 'brdrsh',
BorderPropertySet.DOUBLED : 'brdrdb',
BorderPropertySet.DOTTED : 'brdrdot',
BorderPropertySet.DASHED : 'brdrdash',
BorderPropertySet.HAIRLINE : 'brdrhair' }
SectionBreakTypeMap = { Section.NONE : 'sbknone',
Section.COLUMN : 'sbkcol',
Section.PAGE : 'sbkpage',
Section.EVEN : 'sbkeven',
Section.ODD : 'sbkodd' }
class Settings( list ) :
def __init__( self ) :
super( Settings, self ).__init__()
self._append = super( Settings, self ).append
def append( self, value, mask=None, fallback=None ) :
if (value is not 0) and value in [ False, None, '' ] :
if fallback : self._append( self, fallback )
else :
if mask :
if value is True :
value = mask
else :
value = mask % value
self._append( value )
def Join( self ) :
if self : return r'\%s' % '\\'.join( self )
return ''
def __repr__( self ) :
return self.Join()
class Renderer :
def __init__( self, write_custom_element_callback=None ) :
self.character_style_map = {}
self.paragraph_style_map = {}
self.WriteCustomElement = write_custom_element_callback
#
# All of the Rend* Functions populate a Settings object with values
#
def _RendPageProperties( self, section, settings, in_section ) :
# this one is different from the others as it takes the settings from a
if in_section :
#paper_size_code = 'psz%s'
paper_width_code = 'pgwsxn%s'
paper_height_code = 'pghsxn%s'
landscape = 'lndscpsxn'
margin_suffix = 'sxn'
else :
#paper_size_code = 'psz%s'
paper_width_code = 'paperw%s'
paper_height_code = 'paperh%s'
landscape = 'landscape'
margin_suffix = ''
#settings.append( section.Paper.Code, paper_size_code )
settings.append( section.Paper.Width, paper_width_code )
settings.append( section.Paper.Height, paper_height_code )
if section.Landscape :
settings.append( landscape )
if section.FirstPageNumber :
settings.append( section.FirstPageNumber, 'pgnstarts%s' )
settings.append( 'pgnrestart' )
self._RendMarginsPropertySet( section.Margins, settings, margin_suffix )
def _RendShadingPropertySet( self, shading_props, settings, prefix='' ) :
if not shading_props : return
settings.append( shading_props.Shading, prefix + 'shading%s' )
settings.append( ShadingPatternMap.get( shading_props.Pattern, False ) )
settings.append( self._colour_map.get( shading_props.Foreground, False ), prefix + 'cfpat%s' )
settings.append( self._colour_map.get( shading_props.Background, False ), prefix + 'cbpat%s' )
def _RendBorderPropertySet( self, edge_props, settings ) :
settings.append( BorderStyleMap[ edge_props.Style ] )
settings.append( edge_props.Width , 'brdrw%s' )
settings.append( self._colour_map.get( edge_props.Colour, False ), 'brdrcf%s' )
settings.append( edge_props.Spacing or False , 'brsp%s' )
def _RendFramePropertySet( self, frame_props, settings, tag_prefix='' ) :
if not frame_props : return
if frame_props.Top :
settings.append( tag_prefix + 'brdrt' )
self._RendBorderPropertySet( frame_props.Top, settings )
if frame_props.Left :
settings.append( tag_prefix + 'brdrl' )
self._RendBorderPropertySet( frame_props.Left, settings )
if frame_props.Bottom :
settings.append( tag_prefix + 'brdrb' )
self._RendBorderPropertySet( frame_props.Bottom, settings )
if frame_props.Right :
settings.append( tag_prefix + 'brdrr' )
self._RendBorderPropertySet( frame_props.Right, settings )
def _RendMarginsPropertySet( self, margin_props, settings, suffix='' ) :
if not margin_props : return
settings.append( margin_props.Top, 'margt' + suffix + '%s' )
settings.append( margin_props.Left, 'margl' + suffix + '%s' )
settings.append( margin_props.Bottom, 'margb' + suffix + '%s' )
settings.append( margin_props.Right, 'margr' + suffix + '%s' )
def _RendParagraphPropertySet( self, paragraph_props, settings ) :
if not paragraph_props : return
settings.append( ParagraphAlignmentMap[ paragraph_props.Alignment ] )
settings.append( paragraph_props.SpaceBefore, 'sb%s' )
settings.append( paragraph_props.SpaceAfter, 'sa%s' )
# then we have to find out all of the tabs
width = 0
for tab in paragraph_props.Tabs :
settings.append( TabAlignmentMap[ tab.Alignment ] )
settings.append( TabLeaderMap.get( tab.Leader, '' ) )
width += tab.Width or DEFAULT_TAB_WIDTH
settings.append( 'tx%s' % width )
settings.append( paragraph_props.PageBreakBefore, 'pagebb' )
settings.append( paragraph_props.FirstLineIndent, 'fi%s' )
settings.append( paragraph_props.LeftIndent, 'li%s' )
settings.append( paragraph_props.RightIndent, 'ri%s' )
if paragraph_props.SpaceBetweenLines :
if paragraph_props.SpaceBetweenLines < 0 :
settings.append( paragraph_props.SpaceBetweenLines, r'sl%s\slmult0' )
else :
settings.append( paragraph_props.SpaceBetweenLines, r'sl%s\slmult1' )
def _RendTextPropertySet( self, text_props, settings ) :
if not text_props : return
if text_props.Expansion :
settings.append( text_props.Expansion, 'expndtw%s' )
settings.append( text_props.Bold, 'b' )
settings.append( text_props.Italic, 'i' )
settings.append( text_props.Underline, 'ul' )
settings.append( text_props.DottedUnderline, 'uld' )
settings.append( text_props.DoubleUnderline, 'uldb' )
settings.append( text_props.WordUnderline, 'ulw' )
settings.append( self._font_map.get( text_props.Font, False ), 'f%s' )
settings.append( text_props.Size, 'fs%s' )
settings.append( self._colour_map.get( text_props.Colour, False ), 'cf%s' )
if text_props.Frame :
frame = text_props.Frame
settings.append( 'chbrdr' )
settings.append( BorderStyleMap[ frame.Style ] )
settings.append( frame.Width , 'brdrw%s' )
settings.append( self._colour_map.get( frame.Colour, False ), 'brdrcf%s' )
#
# All of the Write* functions will write to the internal file object
#
# the _ ones probably don't need to be used by anybody outside
# but the other ones like WriteTextElement could be used in the Custom
# callback.
def Write( self, document, fout ) :
# write all of the standard stuff based upon the first document
self._doc = document
self._fout = fout
self._WriteDocument ()
self._WriteColours ()
self._WriteFonts ()
self._WriteStyleSheet()
settings = Settings()
self._RendPageProperties( self._doc.Sections[ 0 ], settings, in_section=False )
self._write( repr( settings ) )
# handle the simplest case first, we don't need to do anymore mucking around
# with section headers, etc we can just rip the document out
if len( document.Sections ) == 1 :
self._WriteSection( document.Sections[ 0 ],
is_first = True,
add_header = False )
else :
for section_idx, section in enumerate( document.Sections ) :
is_first = section_idx == 0
add_header = True
self._WriteSection( section, is_first, add_header )
self._write( '}' )
del self._fout, self._doc, self._CurrentStyle
def _write( self, data, *params ) :
#----------------------------------
# begin modification
# by Herbert Weinhandl
# to convert accented characters
# to their rtf-compatible form
#for c in range( 128, 256 ) :
# data = data.replace( chr(c), "\'%x" % c)
# end modification
#
# This isn't the right place for this as it is going to do
# this loop for all sorts of writes, including settings, control codes, etc.
#
# I will create a def _WriteText (or something) method that is used when the
# actual string that is to be viewed in the document is written, this can then
# do the final accented character check.
#
# I left it here so that I remember to do the right thing when I have time
#----------------------------------
if params : data = data % params
self._fout.write( data )
def _WriteDocument( self ) :
settings = Settings()
assert Languages.IsValid ( self._doc.DefaultLanguage )
assert ViewKind.IsValid ( self._doc.ViewKind )
assert ViewZoomKind.IsValid( self._doc.ViewZoomKind )
assert ViewScale.IsValid ( self._doc.ViewScale )
settings.append( self._doc.DefaultLanguage, 'deflang%s' )
settings.append( self._doc.ViewKind , 'viewkind%s' )
settings.append( self._doc.ViewZoomKind , 'viewzk%s' )
settings.append( self._doc.ViewScale , 'viewscale%s' )
self._write( "{\\rtf1\\ansi\\ansicpg1252\\deff0%s\n" % settings )
def _WriteColours( self ) :
self._write( r"{\colortbl ;" )
self._colour_map = {}
offset = 0
for colour in self._doc.StyleSheet.Colours :
self._write( r'\red%s\green%s\blue%s;', colour.Red, colour.Green, colour.Blue )
self._colour_map[ colour ] = offset + 1
offset += 1
self._write( "}\n" )
def _WriteFonts( self ) :
self._write( r'{\fonttbl' )
self._font_map = {}
offset = 0
for font in self._doc.StyleSheet.Fonts :
pitch = ''
panose = ''
alternate = ''
if font.Pitch : pitch = r'\fprq%s' % font.Pitch
if font.Panose : panose = r'{\*\panose %s}' % font.Panose
if font.Alternate : alternate = r'{\*\falt %s}' % font.Alternate.Name
self._write( r'{\f%s\f%s%s\fcharset%s%s %s%s;}',
offset,
font.Family,
pitch,
font.CharacterSet,
panose,
font.Name,
alternate )
self._font_map[ font ] = offset
offset += 1
self._write( "}\n" )
def _WriteStyleSheet( self ) :
self._write( r"{\stylesheet" )
# TO DO: character styles, does anybody actually use them?
offset_map = {}
for idx, style in enumerate( self._doc.StyleSheet.ParagraphStyles ) :
offset_map[ style ] = idx
# paragraph styles
self.paragraph_style_map = {}
for idx, style in enumerate( self._doc.StyleSheet.ParagraphStyles ) :
if idx == 0 :
default = style
else :
self._write( '\n' )
settings = Settings()
# paragraph properties
self._RendParagraphPropertySet( style.ParagraphPropertySet, settings )
self._RendFramePropertySet ( style.FramePropertySet, settings )
self._RendShadingPropertySet ( style.ShadingPropertySet, settings )
# text properties
self._RendTextPropertySet ( style.TextStyle.TextPropertySet, settings )
self._RendShadingPropertySet( style.TextStyle.ShadingPropertySet, settings )
# have to take
based_on = '\\sbasedon%s' % offset_map.get( style.BasedOn, 0 )
next = '\\snext%s' % offset_map.get( style.Next, 0 )
inln = '\\s%s%s' % ( idx, settings )
self._write( "{%s%s%s %s;}", inln, based_on, next, style.Name )
self.paragraph_style_map[ style ] = inln
# if now style is specified for the first paragraph to be written, this one
# will be used
self._CurrentStyle = self.paragraph_style_map[ default ]
self._write( "}\n" )
def _WriteSection( self, section, is_first, add_header ) :
def WriteHF( hf, rtfword ) :
#if not hf : return
# if we don't have anything in the header/footer then include
# a blank paragraph, this stops it from picking up the header/footer
# from the previous section
# if not hf : hf = [ Paragraph( '' ) ]
if not hf : hf = []
self._write( '{\\%s' % rtfword )
self._WriteElements( hf )
self._write( '}\n' )
settings = Settings()
if not is_first :
# we need to finish off the preceding section
# and reset all of our defaults back to standard
settings.append( 'sect' )
# reset to our defaults
settings.append( 'sectd' )
if add_header :
settings.append( SectionBreakTypeMap[ section.BreakType ] )
self._RendPageProperties( section, settings, in_section=True )
settings.append( section.HeaderY, 'headery%s' )
settings.append( section.FooterY, 'footery%s' )
# write all of these out now as we need to do a write elements in the
# next section
self._write( repr( settings ) )
# finally after all that has settled down we can do the
# headers and footers
if section.FirstHeader or section.FirstFooter :
# include the titlepg flag if the first page has a special format
self._write( r'\titlepg' )
WriteHF( section.FirstHeader, 'headerf' )
WriteHF( section.FirstFooter, 'footerf' )
WriteHF( section.Header, 'header' )
WriteHF( section.Footer, 'footer' )
# and at last the contents of the section that actually appear on the page
self._WriteElements( section )
def _WriteElements( self, elements ) :
new_line = ''
for element in elements :
self._write( new_line )
new_line = '\n'
clss = element.__class__
if clss == Paragraph :
self.WriteParagraphElement( element )
elif clss == Table :
self.WriteTableElement( element )
elif clss == StringType :
self.WriteParagraphElement( Paragraph( element ) )
elif clss in [ RawCode, Image ] :
self.WriteRawCode( element )
#elif clss == List :
# self._HandleListElement( element )
elif self.WriteCustomElement :
self.WriteCustomElement( self, element )
else :
raise Exception( "Don't know how to handle elements of type %s" % clss )
def WriteParagraphElement( self, paragraph_elem, tag_prefix='', tag_suffix=r'\par', opening='{', closing='}' ) :
# the tag_prefix and the tag_suffix take care of paragraphs in tables. A
# paragraph in a table requires and extra tag at the front (intbl) and we
# don't want the ending tag everytime. We want it for all paragraphs but
# the last.
overrides = Settings()
self._RendParagraphPropertySet( paragraph_elem.Properties, overrides )
self._RendFramePropertySet ( paragraph_elem.Frame, overrides )
self._RendShadingPropertySet ( paragraph_elem.Shading, overrides )
# when writing the RTF the style is carried from the previous paragraph to the next,
# so if the currently written paragraph has a style then make it the current one,
# otherwise leave it as it was
self._CurrentStyle = self.paragraph_style_map.get( paragraph_elem.Style, self._CurrentStyle )
self._write( r'%s\pard\plain%s %s%s ' % ( opening, tag_prefix, self._CurrentStyle, overrides ) )
for element in paragraph_elem :
if isinstance( element, StringType ) :
self._write( element )
elif isinstance( element, RawCode ) :
self._write( element.Data )
elif isinstance( element, Text ) :
self.WriteTextElement( element )
elif isinstance( element, Inline ) :
self.WriteInlineElement( element )
elif element == TAB :
self._write( r'\tab ' )
elif element == LINE :
self._write( r'\line ' )
elif self.WriteCustomElement :
self.WriteCustomElement( self, element )
else :
raise Exception( 'Don\'t know how to handle %s' % element )
self._write( tag_suffix + closing )
def WriteRawCode( self, raw_elem ) :
self._write( raw_elem.Data )
def WriteTextElement( self, text_elem ) :
overrides = Settings()
self._RendTextPropertySet ( text_elem.Properties, overrides )
self._RendShadingPropertySet( text_elem.Shading, overrides, 'ch' )
# write the wrapper and then let the custom handler have a go
if overrides : self._write( '{%s ' % repr( overrides ) )
# if the data is just a string then we can now write it
if isinstance( text_elem.Data, StringType ) :
self._write( text_elem.Data or '' )
elif text_elem.Data == TAB :
self._write( r'\tab ' )
else :
self.WriteCustomElement( self, text_elem.Data )
if overrides : self._write( '}' )
def WriteInlineElement( self, inline_elem ) :
overrides = Settings()
self._RendTextPropertySet ( inline_elem.Properties, overrides )
self._RendShadingPropertySet( inline_elem.Shading, overrides, 'ch' )
# write the wrapper and then let the custom handler have a go
if overrides : self._write( '{%s ' % repr( overrides ) )
for element in inline_elem :
# if the data is just a string then we can now write it
if isinstance( element, StringType ) :
self._write( element )
elif isinstance( element, RawCode ) :
self._write( element.Data )
elif element == TAB :
self._write( r'\tab ' )
elif element == LINE :
self._write( r'\line ' )
else :
self.WriteCustomElement( self, element )
if overrides : self._write( '}' )
def WriteText( self, text ) :
self._write( text or '' )
def WriteTableElement( self, table_elem ) :
vmerge = [ False ] * table_elem.ColumnCount
for height, cells in table_elem.Rows :
# calculate the right hand edge of the cells taking into account the spans
offset = table_elem.LeftOffset or 0
cellx = []
cell_idx = 0
for cell in cells :
cellx.append( offset + sum( table_elem.ColumnWidths[ : cell_idx + cell.Span ] ) )
cell_idx += cell.Span
self._write( r'{\trowd' )
settings = Settings()
# the spec says that this value is mandatory and I think that 108 is the default value
# so I'll take care of it here
settings.append( table_elem.GapBetweenCells or 108, 'trgaph%s' )
settings.append( TableAlignmentMap[ table_elem.Alignment ] )
settings.append( height, 'trrh%s' )
settings.append( table_elem.LeftOffset, 'trleft%s' )
width = table_elem.LeftOffset or 0
for idx, cell in enumerate( cells ) :
self._RendFramePropertySet ( cell.Frame, settings, 'cl' )
# cells don't have margins so I don't know why I was doing this
# I think it might have an affect in some versions of some WPs.
#self._RendMarginsPropertySet( cell.Margins, settings, 'cl' )
# if we are starting to merge or if this one is the first in what is
# probably a series of merges then start the vertical merging
if cell.StartVerticalMerge or (cell.VerticalMerge and not vmerge[ idx ]) :
settings.append( 'clvmgf' )
vmerge[ idx ] = True
elif cell.VerticalMerge :
#..continuing a merge
settings.append( 'clvmrg' )
else :
#..no merging going on so make sure that it is off
vmerge[ idx ] = False
# for any cell in the next row that is covered by this span we
# need to run off the vertical merging as we don't want them
# merging up into this spanned cell
for vmerge_idx in range( idx + 1, idx + cell.Span - 1 ) :
vmerge[ vmerge_idx ] = False
settings.append( CellAlignmentMap[ cell.Alignment ] )
settings.append( CellFlowMap[ cell.Flow ] )
# this terminates the definition of a cell and represents the right most edge of the cell from the left margin
settings.append( cellx[ idx ], 'cellx%s' )
self._write( repr( settings ) )
for cell in cells :
if len( cell ) :
last_idx = len( cell ) - 1
for element_idx, element in enumerate( cell ) :
# wrap plain strings in paragraph tags
if isinstance( element, StringType ) :
element = Paragraph( element )
# don't forget the prefix or else word crashes and does all sorts of strange things
if element_idx == last_idx :
self.WriteParagraphElement( element, tag_prefix=r'\intbl', tag_suffix='', opening='', closing='' )
else :
self.WriteParagraphElement( element, tag_prefix=r'\intbl', opening='', closing='' )
self._write( r'\cell' )
else :
self._write( r'\pard\intbl\cell' )
self._write( '\\row}\n' )
| Python |
from types import IntType, FloatType, LongType, StringTypes
from copy import deepcopy
from binascii import hexlify
from Constants import *
from Styles import *
class UnhandledParamError( Exception ) :
def __init__( self, param ) :
Exception.__init__( self, "Don't know what to do with param %s" % param )
# red green blue
StandardColours = Colours()
StandardColours.append( Colour( 'Black', 0, 0, 0 ) )
StandardColours.append( Colour( 'Blue', 0, 0, 255 ) )
StandardColours.append( Colour( 'Turquoise', 0, 255, 255 ) )
StandardColours.append( Colour( 'Green', 0, 255, 0 ) )
StandardColours.append( Colour( 'Pink', 255, 0, 255 ) )
StandardColours.append( Colour( 'Red', 255, 0, 0 ) )
StandardColours.append( Colour( 'Yellow', 255, 255, 0 ) )
StandardColours.append( Colour( 'White', 255, 255, 255 ) )
StandardColours.append( Colour( 'Blue Dark', 0, 0, 128 ) )
StandardColours.append( Colour( 'Teal', 0, 128, 128 ) )
StandardColours.append( Colour( 'Green Dark', 0, 128, 0 ) )
StandardColours.append( Colour( 'Violet', 128, 0, 128 ) )
StandardColours.append( Colour( 'Red Dark', 128, 0, 0 ) )
StandardColours.append( Colour( 'Yellow Dark', 128, 128, 0 ) )
StandardColours.append( Colour( 'Grey Dark', 128, 128, 128 ) )
StandardColours.append( Colour( 'Grey', 192, 192, 192 ) )
StandardFonts = Fonts()
StandardFonts.append( Font( 'Arial' , 'swiss' , 0, 2, '020b0604020202020204' ) )
StandardFonts.append( Font( 'Arial Black' , 'swiss' , 0, 2, '020b0a04020102020204' ) )
StandardFonts.append( Font( 'Arial Narrow' , 'swiss' , 0, 2, '020b0506020202030204' ) )
StandardFonts.append( Font( 'Bitstream Vera Sans Mono', 'modern', 0, 1, '020b0609030804020204' ) )
StandardFonts.append( Font( 'Bitstream Vera Sans' , 'swiss' , 0, 2, '020b0603030804020204' ) )
StandardFonts.append( Font( 'Bitstream Vera Serif' , 'roman' , 0, 2, '02060603050605020204' ) )
StandardFonts.append( Font( 'Book Antiqua' , 'roman' , 0, 2, '02040602050305030304' ) )
StandardFonts.append( Font( 'Bookman Old Style' , 'roman' , 0, 2, '02050604050505020204' ) )
StandardFonts.append( Font( 'Castellar' , 'roman' , 0, 2, '020a0402060406010301' ) )
StandardFonts.append( Font( 'Century Gothic' , 'swiss' , 0, 2, '020b0502020202020204' ) )
StandardFonts.append( Font( 'Comic Sans MS' , 'script', 0, 2, '030f0702030302020204' ) )
StandardFonts.append( Font( 'Courier New' , 'modern', 0, 1, '02070309020205020404' ) )
StandardFonts.append( Font( 'Franklin Gothic Medium' , 'swiss' , 0, 2, '020b0603020102020204' ) )
StandardFonts.append( Font( 'Garamond' , 'roman' , 0, 2, '02020404030301010803' ) )
StandardFonts.append( Font( 'Georgia' , 'roman' , 0, 2, '02040502050405020303' ) )
StandardFonts.append( Font( 'Haettenschweiler' , 'swiss' , 0, 2, '020b0706040902060204' ) )
StandardFonts.append( Font( 'Impact' , 'swiss' , 0, 2, '020b0806030902050204' ) )
StandardFonts.append( Font( 'Lucida Console' , 'modern', 0, 1, '020b0609040504020204' ) )
StandardFonts.append( Font( 'Lucida Sans Unicode' , 'swiss' , 0, 2, '020b0602030504020204' ) )
StandardFonts.append( Font( 'Microsoft Sans Serif' , 'swiss' , 0, 2, '020b0604020202020204' ) )
StandardFonts.append( Font( 'Monotype Corsiva' , 'script', 0, 2, '03010101010201010101' ) )
StandardFonts.append( Font( 'Palatino Linotype' , 'roman' , 0, 2, '02040502050505030304' ) )
StandardFonts.append( Font( 'Papyrus' , 'script', 0, 2, '03070502060502030205' ) )
StandardFonts.append( Font( 'Sylfaen' , 'roman' , 0, 2, '010a0502050306030303' ) )
StandardFonts.append( Font( 'Symbol' , 'roman' , 2, 2, '05050102010706020507' ) )
StandardFonts.append( Font( 'Tahoma' , 'swiss' , 0, 2, '020b0604030504040204' ) )
StandardFonts.append( Font( 'Times New Roman' , 'roman' , 0, 2, '02020603050405020304' ) )
StandardFonts.append( Font( 'Trebuchet MS' , 'swiss' , 0, 2, '020b0603020202020204' ) )
StandardFonts.append( Font( 'Verdana' , 'swiss' , 0, 2, '020b0604030504040204' ) )
StandardFonts.Castellar.SetAlternate( StandardFonts.Georgia )
"""
Found the following definition at http://www.pbdr.com/vbtips/gen/convtwip.htm
Twips are screen-independent units used to ensure that the placement and
proportion of screen elements in your screen application are the same on all
display systems. A twip is a unit of screen measurement equal to 1/20 of a
printer's point. The conversion between twips and
inches/centimeters/millimeters is as follows:
There are approximately 1440 twips to a inch (the length of a screen item
measuring one inch when printed).
As there are 2.54 centimeters to 1 inch, then there are approximately 567
twips to a centimeter (the length of a screen item measuring one centimeter
when printed).
Or in millimeters, as there are 25.4 millimeters to 1 inch, therefore there
are approximately 56.7 twips to a millimeter (the length of a screen item
measuring one millimeter when printed)."""
# Width default is 12240, Height default is 15840
StandardPaper = Papers()
StandardPaper.append( Paper( 'LETTER' , 1, 'Letter 8 1/2 x 11 in' , 12240, 15840 ) )
StandardPaper.append( Paper( 'LETTERSMALL' , 2, 'Letter Small 8 1/2 x 11 in' , 12240, 15840 ) )
StandardPaper.append( Paper( 'TABLOID' , 3, 'Tabloid 11 x 17 in' , 15840, 24480 ) )
StandardPaper.append( Paper( 'LEDGER' , 4, 'Ledger 17 x 11 in' , 24480, 15840 ) )
StandardPaper.append( Paper( 'LEGAL' , 5, 'Legal 8 1/2 x 14 in' , 12240, 20160 ) )
StandardPaper.append( Paper( 'STATEMENT' , 6, 'Statement 5 1/2 x 8 1/2 in' , 7920, 12240 ) )
StandardPaper.append( Paper( 'EXECUTIVE' , 7, 'Executive 7 1/4 x 10 1/2 in' , 10440, 15120 ) )
StandardPaper.append( Paper( 'A3' , 8, 'A3 297 x 420 mm' , 16838, 23811 ) )
StandardPaper.append( Paper( 'A4' , 9, 'A4 210 x 297 mm' , 11907, 16838 ) )
StandardPaper.append( Paper( 'A4SMALL' , 10, 'A4 Small 210 x 297 mm' , 11907, 16838 ) )
StandardPaper.append( Paper( 'A5' , 11, 'A5 148 x 210 mm' , 8391, 11907 ) )
StandardPaper.append( Paper( 'B4' , 12, 'B4 (JIS) 250 x 354' , 14175, 20072 ) )
StandardPaper.append( Paper( 'B5' , 13, 'B5 (JIS) 182 x 257 mm' , 10319, 14572 ) )
StandardPaper.append( Paper( 'FOLIO' , 14, 'Folio 8 1/2 x 13 in' , 12240, 18720 ) )
StandardPaper.append( Paper( 'QUARTO' , 15, 'Quarto 215 x 275 mm' , 12191, 15593 ) )
StandardPaper.append( Paper( '10X14' , 16, '10x14 in' , 14400, 20160 ) )
StandardPaper.append( Paper( '11X17' , 17, '11x17 in' , 15840, 24480 ) )
StandardPaper.append( Paper( 'NOTE' , 18, 'Note 8 1/2 x 11 in' , 12240, 15840 ) )
StandardPaper.append( Paper( 'ENV_9' , 19, 'Envelope #9 3 7/8 x 8 7/8' , 5580, 12780 ) )
StandardPaper.append( Paper( 'ENV_10' , 20, 'Envelope #10 4 1/8 x 9 1/2' , 5940, 13680 ) )
StandardPaper.append( Paper( 'ENV_11' , 21, 'Envelope #11 4 1/2 x 10 3/8' , 6480, 14940 ) )
StandardPaper.append( Paper( 'ENV_12' , 22, 'Envelope #12 4 3/4 x 11' , 6840, 15840 ) )
StandardPaper.append( Paper( 'ENV_14' , 23, 'Envelope #14 5 x 11 1/2' , 7200, 16560 ) )
StandardPaper.append( Paper( 'CSHEET' , 24, 'C size sheet 18 x 24 in' , 29520, 34560 ) )
StandardPaper.append( Paper( 'DSHEET' , 25, 'D size sheet 22 x 34 in' , 31680, 48960 ) )
StandardPaper.append( Paper( 'ESHEET' , 26, 'E size sheet 34 x 44 in' , 48960, 63360 ) )
StandardPaper.append( Paper( 'ENV_DL' , 27, 'Envelope DL 110 x 220mm' , 6237, 12474 ) )
StandardPaper.append( Paper( 'ENV_C5' , 28, 'Envelope C5 162 x 229 mm' , 9185, 12984 ) )
StandardPaper.append( Paper( 'ENV_C3' , 29, 'Envelope C3 324 x 458 mm' , 18371, 25969 ) )
StandardPaper.append( Paper( 'ENV_C4' , 30, 'Envelope C4 229 x 324 mm' , 12984, 18371 ) )
StandardPaper.append( Paper( 'ENV_C6' , 31, 'Envelope C6 114 x 162 mm' , 6464, 9185 ) )
StandardPaper.append( Paper( 'ENV_C65' , 32, 'Envelope C65 114 x 229 mm' , 6464, 12984 ) )
StandardPaper.append( Paper( 'ENV_B4' , 33, 'Envelope B4 250 x 353 mm' , 14175, 20015 ) )
StandardPaper.append( Paper( 'ENV_B5' , 34, 'Envelope B5 176 x 250 mm' , 9979, 14175 ) )
StandardPaper.append( Paper( 'ENV_B6' , 35, 'Envelope B6 176 x 125 mm' , 9979, 7088 ) )
StandardPaper.append( Paper( 'ENV_ITALY' , 36, 'Envelope 110 x 230 mm' , 6237, 13041 ) )
StandardPaper.append( Paper( 'ENV_MONARCH' , 37, 'Envelope Monarch 3.875 x 7.5 in' , 5580, 10800 ) )
StandardPaper.append( Paper( 'ENV_PERSONAL' , 38, '6 3/4 Envelope 3 5/8 x 6 1/2 in' , 5220, 9360 ) )
StandardPaper.append( Paper( 'FANFOLD_US' , 39, 'US Std Fanfold 14 7/8 x 11 in' , 21420, 15840 ) )
StandardPaper.append( Paper( 'FANFOLD_STD_GERMAN' , 40, 'German Std Fanfold 8 1/2 x 12 in' , 12240, 17280 ) )
StandardPaper.append( Paper( 'FANFOLD_LGL_GERMAN' , 41, 'German Legal Fanfold 8 1/2 x 13 in' , 12240, 18720 ) )
#
# Finally a StyleSheet in which all of this stuff is put together
#
class StyleSheet :
def __init__( self, colours=None, fonts=None ) :
self.Colours = colours or deepcopy( StandardColours )
self.Fonts = fonts or deepcopy( StandardFonts )
self.TextStyles = AttributedList()
self.ParagraphStyles = AttributedList()
class Section( list ) :
NONE = 1
COLUMN = 2
PAGE = 3
EVEN = 4
ODD = 5
BREAK_TYPES = [ NONE, COLUMN, PAGE, EVEN, ODD ]
def __init__( self, paper=None, margins=None, break_type=None, headery=None, footery=None, landscape=None, first_page_number=None ) :
super( Section, self ).__init__()
self.Paper = paper or StandardPaper.A4
self.SetMargins( margins )
self.Header = []
self.Footer = []
self.FirstHeader = []
self.FirstFooter = []
self.SetBreakType( break_type or self.NONE )
self.SetHeaderY( headery )
self.SetFooterY( footery )
self.SetLandscape( landscape )
self.SetFirstPageNumber( first_page_number )
def TwipsToRightMargin( self ) :
return self.Paper.Width - ( self.Margins.Left + self.Margins.Right )
def SetMargins( self, value ) :
self.Margins = value or MarginsPropertySet( top=1000, left=1200, bottom=1000, right=1200 )
self.Width = self.Paper.Width - ( self.Margins.Left + self.Margins.Right )
def SetBreakType( self, value ) :
assert value in self.BREAK_TYPES
self.BreakType = value
return self
def SetHeaderY( self, value ) :
self.HeaderY = value
return self
def SetFooterY( self, value ) :
self.FooterY = value
return self
def SetLandscape( self, value ) :
self.Landscape = False
if value : self.Landscape = True
return self
def SetFirstPageNumber( self, value ) :
self.FirstPageNumber = value
return self
def MakeDefaultStyleSheet( ) :
result = StyleSheet()
NormalText = TextStyle( TextPropertySet( result.Fonts.Arial, 22 ) )
ps = ParagraphStyle( 'Normal',
NormalText.Copy(),
ParagraphPropertySet( space_before = 60,
space_after = 60 ) )
result.ParagraphStyles.append( ps )
ps = ParagraphStyle( 'Normal Short',
NormalText.Copy() )
result.ParagraphStyles.append( ps )
NormalText.TextPropertySet.SetSize( 32 )
ps = ParagraphStyle( 'Heading 1',
NormalText.Copy(),
ParagraphPropertySet( space_before = 240,
space_after = 60 ) )
result.ParagraphStyles.append( ps )
NormalText.TextPropertySet.SetSize( 24 ).SetBold( True )
ps = ParagraphStyle( 'Heading 2',
NormalText.Copy(),
ParagraphPropertySet( space_before = 240,
space_after = 60 ) )
result.ParagraphStyles.append( ps )
# Add some more in that are based on the normal template but that
# have some indenting set that makes them suitable for doing numbered
normal_numbered = result.ParagraphStyles.Normal.Copy()
normal_numbered.SetName( 'Normal Numbered' )
normal_numbered.ParagraphPropertySet.SetFirstLineIndent( TabPropertySet.DEFAULT_WIDTH * -1 )
normal_numbered.ParagraphPropertySet.SetLeftIndent ( TabPropertySet.DEFAULT_WIDTH )
result.ParagraphStyles.append( normal_numbered )
normal_numbered2 = result.ParagraphStyles.Normal.Copy()
normal_numbered2.SetName( 'Normal Numbered 2' )
normal_numbered2.ParagraphPropertySet.SetFirstLineIndent( TabPropertySet.DEFAULT_WIDTH * -1 )
normal_numbered2.ParagraphPropertySet.SetLeftIndent ( TabPropertySet.DEFAULT_WIDTH * 2 )
result.ParagraphStyles.append( normal_numbered2 )
## LIST STYLES
for idx, indent in [ (1, TabPS.DEFAULT_WIDTH ),
(2, TabPS.DEFAULT_WIDTH * 2),
(3, TabPS.DEFAULT_WIDTH * 3) ] :
indent = TabPropertySet.DEFAULT_WIDTH
ps = ParagraphStyle( 'List %s' % idx,
TextStyle( TextPropertySet( result.Fonts.Arial, 22 ) ),
ParagraphPropertySet( space_before = 60,
space_after = 60,
first_line_indent = -indent,
left_indent = indent) )
result.ParagraphStyles.append( ps )
return result
class TAB : pass
class LINE : pass
class RawCode :
def __init__( self, data ) :
self.Data = data
PAGE_NUMBER = RawCode( r'{\field{\fldinst page}}' )
TOTAL_PAGES = RawCode( r'{\field{\fldinst numpages}}' )
SECTION_PAGES = RawCode( r'{\field{\fldinst sectionpages}}' )
ARIAL_BULLET = RawCode( r'{\f2\'95}' )
def _get_jpg_dimensions( fin ):
"""
converted from: http://dev.w3.org/cvsweb/Amaya/libjpeg/rdjpgcom.c?rev=1.2
"""
M_SOF0 = chr( 0xC0 ) # /* Start Of Frame N */
M_SOF1 = chr( 0xC1 ) # /* N indicates which compression process */
M_SOF2 = chr( 0xC2 ) # /* Only SOF0-SOF2 are now in common use */
M_SOF3 = chr( 0xC3 ) #
M_SOF5 = chr( 0xC5 ) # /* NB: codes C4 and CC are NOT SOF markers */
M_SOF6 = chr( 0xC6 ) #
M_SOF7 = chr( 0xC7 ) #
M_SOF9 = chr( 0xC9 ) #
M_SOF10 = chr( 0xCA ) #
M_SOF11 = chr( 0xCB ) #
M_SOF13 = chr( 0xCD ) #
M_SOF14 = chr( 0xCE ) #
M_SOF15 = chr( 0xCF ) #
M_SOI = chr( 0xD8 ) # /* Start Of Image (beginning of datastream) */
M_EOI = chr( 0xD9 ) # /* End Of Image (end of datastream) */
M_FF = chr( 0xFF )
MARKERS = [ M_SOF0, M_SOF1, M_SOF2, M_SOF3,
M_SOF5, M_SOF6, M_SOF7, M_SOF9,
M_SOF10,M_SOF11, M_SOF13, M_SOF14,
M_SOF15 ]
def get_length() :
b1 = fin.read( 1 )
b2 = fin.read( 1 )
return (ord(b1) << 8) + ord(b2)
def next_marker() :
# markers come straight after an 0xFF so skip everything
# up to the first 0xFF that we find
while fin.read(1) != M_FF :
pass
# there can be more than one 0xFF as they can be used
# for padding so we are now looking for the first byte
# that isn't an 0xFF, this will be the marker
while True :
result = fin.read(1)
if result != M_FF :
return result
raise Exception( 'Invalid JPEG' )
# BODY OF THE FUNCTION
if not ((fin.read(1) == M_FF) and (fin.read(1) == M_SOI)) :
raise Exception( 'Invalid Jpeg' )
while True :
marker = next_marker()
# the marker is always followed by two bytes representing the length of the data field
length = get_length ()
if length < 2 : raise Exception( "Erroneous JPEG marker length" )
# if it is a compression process marker then it will contain the dimension of the image
if marker in MARKERS :
# the next byte is the data precision, just skip it
fin.read(1)
# bingo
image_height = get_length()
image_width = get_length()
return image_width, image_height
# just skip whatever data it contains
fin.read( length - 2 )
raise Exception( 'Invalid JPEG, end of stream reached' )
_PNG_HEADER = '\x89\x50\x4e'
def _get_png_dimensions( data ) :
if data[0:3] != _PNG_HEADER :
raise Exception( 'Invalid PNG image' )
width = (ord(data[18]) * 256) + (ord(data[19]))
height = (ord(data[22]) * 256) + (ord(data[23]))
return width, height
def _get_emf_dimensions( fin ):
import struct
def get_DWORD():
return struct.unpack("<L",fin.read(4))[0]
def get_LONG():
return struct.unpack("<l",fin.read(4))[0]
def get_WORD():
return struct.unpack("<H",fin.read(2))[0]
class Empty:
pass
header = Empty()
header.RecordType = get_DWORD() # Record type
header.RecordSize = get_DWORD() # Size of the record in bytes
header.BoundsLeft = get_LONG() # Left inclusive bounds
header.BoundsTop = get_LONG() # Top inclusive bounds
header.BoundsRight = get_LONG() # Right inclusive bounds
header.BoundsBottom = get_LONG() # Bottom inclusive bounds
header.FrameLeft = get_LONG() # Left side of inclusive picture frame
header.FrameTop = get_LONG() # Top side of inclusive picture frame
header.FrameRight = get_LONG() # Right side of inclusive picture frame
header.FrameBottom = get_LONG() # Bottom side of inclusive picture frame
header.Signature = get_DWORD() # Signature ID (always 0x464D4520)
header.Version = get_DWORD() # Version of the metafile
header.Size = get_DWORD() # Size of the metafile in bytes
header.NumOfRecords = get_DWORD() # Number of records in the metafile
header.NumOfHandles = get_WORD() # Number of handles in the handle table
header.Reserved = get_WORD() # Not used (always 0)
header.SizeOfDescrip = get_DWORD() # Size of description string in WORDs
header.OffsOfDescrip = get_DWORD() # Offset of description string in metafile
header.NumPalEntries = get_DWORD() # Number of color palette entries
header.WidthDevPixels = get_LONG() # Width of reference device in pixels
header.HeightDevPixels = get_LONG() # Height of reference device in pixels
header.WidthDevMM = get_LONG() # Width of reference device in millimeters
header.HeightDevMM = get_LONG() # Height of reference device in millimeters
if 0:
klist = header.__dict__.keys()
klist.sort()
for k in klist:
print "%20s:%s" % (k,header.__dict__[k])
dw = header.FrameRight-header.FrameLeft
dh = header.FrameBottom-header.FrameTop
# convert from 0.01mm units to 1/72in units
return int(dw * 72.0/2540.0), int(dh * 72.0/2540.0)
class Image( RawCode ) :
# Need to add in the width and height in twips as it crashes
# word xp with these values. Still working out the most
# efficient way of getting these values.
# \picscalex100\picscaley100\piccropl0\piccropr0\piccropt0\piccropb0
# picwgoal900\pichgoal281
PNG_LIB = 'pngblip'
JPG_LIB = 'jpegblip'
EMF_LIB = 'emfblip'
PICT_TYPES = { 'png' : PNG_LIB,
'jpg' : JPG_LIB,
'emf' : EMF_LIB}
def __init__( self, infile, **kwargs ) :
if hasattr( infile, 'read' ):
fin = infile
if 'datatype' not in kwargs.keys():
msg = "If passing in a file object, you must also specify type='xxx' where xxx is one of %s" % self.PICT_TYPES.keys()
raise ValueError,msg
file_name = kwargs.pop('datatype')
else:
fin = file( infile, 'rb' )
file_name = infile
pict_type = self.PICT_TYPES[ file_name[ -3 : ].lower() ]
if pict_type == self.PNG_LIB :
width, height = _get_png_dimensions( fin.read( 100 ) )
elif pict_type == self.JPG_LIB :
width, height = _get_jpg_dimensions( fin )
elif pict_type == self.EMF_LIB :
width, height = _get_emf_dimensions( fin )
# if user specified height or width but not both, then
# scale unspecified dimension to maintain aspect ratio
if ('width' in kwargs) and ('height' not in kwargs):
height = int(height * float(kwargs['width'])/width)
elif ('height' in kwargs) and ('width' not in kwargs):
width = int(width * float(kwargs['height'])/height)
width = kwargs.pop('width',width)
height = kwargs.pop('height', height)
codes = [ pict_type,
'picwgoal%s' % (width * 20),
'pichgoal%s' % (height * 20) ]
# let user specify global scaling
scale = kwargs.pop('scale',100)
for kwarg, code, default in [ ( 'scale_x', 'scalex', scale ),
( 'scale_y', 'scaley', scale ),
( 'crop_left', 'cropl', '0' ),
( 'crop_right', 'cropr', '0' ),
( 'crop_top', 'cropt', '0' ),
( 'crop_bottom', 'cropb', '0' ) ] :
codes.append( 'pic%s%s' % ( code, kwargs.pop( kwarg, default ) ) )
# reset back to the start of the file to get all of it and now
# turn it into hex.
fin.seek( 0, 0 )
image = hexlify( fin.read() )
fin.close()
data = []
for i in range( 0, len( image ), 128 ) :
data.append( image[ i : i + 128 ] )
data = r'{\pict{\%s}%s}' % ( '\\'.join( codes ), '\n'.join( data ) )
RawCode.__init__( self, data )
def ToRawCode( self, var_name ) :
return '%s = RawCode( """%s""" )' % ( var_name, self.Data )
class Text :
def __init__( self, *params ) :
self.Data = None
self.Style = None
self.Properties = None
self.Shading = None
for param in params :
if isinstance( param, TextStyle ) : self.Style = param
elif isinstance( param, TextPS ) : self.Properties = param
elif isinstance( param, ShadingPS ) : self.Shading = param
else :
# otherwise let the rendering custom handler sort it out itself
self.Data = param
def SetData( self, value ) :
self.Data = value
class Inline( list ) :
def __init__( self, *params ) :
super( Inline, self ).__init__()
self.Style = None
self.Properties = None
self.Shading = None
self._append = super( Inline, self ).append
for param in params :
if isinstance( param, TextStyle ) : self.Style = param
elif isinstance( param, TextPS ) : self.Properties = param
elif isinstance( param, ShadingPS ) : self.Shading = param
else :
# otherwise we add to it to our list of elements and let
# the rendering custom handler sort it out itself.
self.append( param )
def append( self, *params ) :
# filter out any that are explicitly None
[ self._append( param ) for param in params if param is not None ]
class Paragraph( list ) :
def __init__( self, *params ) :
super( Paragraph, self ).__init__()
self.Style = None
self.Properties = None
self.Frame = None
self.Shading = None
self._append = super( Paragraph, self ).append
for param in params :
if isinstance( param, ParagraphStyle ) : self.Style = param
elif isinstance( param, ParagraphPS ) : self.Properties = param
elif isinstance( param, FramePS ) : self.Frame = param
elif isinstance( param, ShadingPS ) : self.Shading = param
else :
# otherwise we add to it to our list of elements and let
# the rendering custom handler sort it out itself.
self.append( param )
def append( self, *params ) :
# filter out any that are explicitly None
[ self._append( param ) for param in params if param is not None ]
def insert( self, index, value ) :
if value is not None :
super( Paragraph, self ).insert( index, value )
class Table :
LEFT = 1
RIGHT = 2
CENTER = 3
ALIGNMENT = [ LEFT, RIGHT, CENTER ]
NO_WRAPPING = 1
WRAP_AROUND = 2
WRAPPING = [ NO_WRAPPING, WRAP_AROUND ]
# trrh height of row, 0 means automatically adjust, use negative for an absolute
# trgaph is half of the space between a table cell in width, reduce this one
# to get a really tiny column
def __init__( self, *column_widths, **kwargs ) :
self.Rows = []
self.SetAlignment ( kwargs.pop( 'alignment', self.LEFT ) )
self.SetLeftOffset ( kwargs.pop( 'left_offset', None ) )
self.SetGapBetweenCells( kwargs.pop( 'gap_between_cells', None ) )
self.SetColumnWidths ( *column_widths )
assert not kwargs, 'invalid keyword args %s' % kwargs
def SetAlignment( self, value ) :
assert value is None or value in self.ALIGNMENT
self.Alignment = value or self.LEFT
return self
def SetLeftOffset( self, value ) :
self.LeftOffset = value
return self
def SetGapBetweenCells( self, value ) :
self.GapBetweenCells = value
return self
def SetColumnWidths( self, *column_widths ) :
self.ColumnWidths = column_widths
self.ColumnCount = len( column_widths )
return self
def AddRow( self, *cells ) :
height = None
if isinstance( cells[ 0 ], (IntType, FloatType, LongType) ):
height = int( cells[ 0 ] )
cells = cells[ 1 : ]
# make sure all of the spans add up to the number of columns
# otherwise the table will get corrupted
if self.ColumnCount != sum( [ cell.Span for cell in cells ] ) :
raise Exception( 'ColumnCount != the total of this row\'s cell.Spans.' )
self.Rows.append( ( height, cells ) )
append = AddRow
class Cell( list ) :
"""
\clvertalt Text is top-aligned in cell (the default).
\clvertalc Text is centered vertically in cell.
\clvertalb Text is bottom-aligned in cell.
\cltxlrtb Vertical text aligned left (direction bottom up).
\cltxtbrl Vertical text aligned right (direction top down).
"""
ALIGN_TOP = 1
ALIGN_CENTER = 2
ALIGN_BOTTOM = 3
FLOW_LR_TB = 1
FLOW_RL_TB = 2
FLOW_LR_BT = 3
FLOW_VERTICAL_LR_TB = 4
FLOW_VERTICAL_TB_RL = 5
def __init__( self, *params, **kwargs ) :
super( Cell, self ).__init__()
self.SetFrame ( None )
self.SetMargins( None )
self.SetAlignment( kwargs.get( 'alignment', self.ALIGN_TOP ) )
self.SetFlow ( kwargs.get( 'flow' , self.FLOW_LR_TB ) )
self.SetSpan ( kwargs.get( 'span', 1 ) )
self.SetStartVerticalMerge( kwargs.get( 'start_vertical_merge', False ) )
self.SetVerticalMerge ( kwargs.get( 'vertical_merge', False ) )
self._append = super( Cell, self ).append
for param in params :
if isinstance( param, StringType ) : self.append ( param )
elif isinstance( param, Paragraph ) : self.append ( param )
elif isinstance( param, FramePS ) : self.SetFrame ( param )
elif isinstance( param, MarginsPS ) : self.SetMargins( param )
def SetFrame( self, value ) :
self.Frame = value
return self
def SetMargins( self, value ) :
self.Margins = value
return self
def SetAlignment( self, value ) :
assert value in [ self.ALIGN_TOP, self.ALIGN_CENTER, self.ALIGN_BOTTOM ] #, self.ALIGN_TEXT_TOP_DOWN, self.ALIGN_TEXT_BOTTOM_UP ]
self.Alignment = value
def SetFlow( self, value ) :
assert value in [ self.FLOW_LR_TB, self.FLOW_RL_TB, self.FLOW_LR_BT, self.FLOW_VERTICAL_LR_TB, self.FLOW_VERTICAL_TB_RL ]
self.Flow = value
def SetSpan( self, value ) :
# must be a positive integer
self.Span = int( max( value, 1 ) )
return self
def SetStartVerticalMerge( self, value ) :
self.StartVerticalMerge = False
if value :
self.StartVerticalMerge = True
return self
def SetVerticalMerge( self, value ) :
self.VerticalMerge = False
if value :
self.VerticalMerge = True
return self
def append( self, *params ) :
[ self._append( param ) for param in params ]
class Document :
def __init__( self, style_sheet=None, default_language=None, view_kind=None, view_zoom_kind=None, view_scale=None ) :
self.StyleSheet = style_sheet or MakeDefaultStyleSheet()
self.Sections = AttributedList( Section )
self.SetTitle( None )
self.DefaultLanguage = default_language or Languages.DEFAULT
self.ViewKind = view_kind or ViewKind.DEFAULT
self.ViewZoomKind = view_zoom_kind
self.ViewScale = view_scale
def NewSection( self, *params, **kwargs ) :
result = Section( *params, **kwargs )
self.Sections.append( result )
return result
def SetTitle( self, value ) :
self.Title = value
return self
def Copy( self ) :
result = Document( style_sheet = self.StyleSheet.Copy(),
default_language = self.DefaultLanguage,
view_kind = self.ViewKind,
view_zoom_kind = self.ViewZoomKind,
view_scale = self.ViewScale )
result.SetTitle( self.Title )
result.Sections = self.Sections.Copy()
return result
def TEXT( *params, **kwargs ) :
text_props = TextPropertySet()
text_props.SetFont ( kwargs.get( 'font', None ) )
text_props.SetSize ( kwargs.get( 'size', None ) )
text_props.SetBold ( kwargs.get( 'bold', False ) )
text_props.SetItalic ( kwargs.get( 'italic', False ) )
text_props.SetUnderline( kwargs.get( 'underline', False ) )
text_props.SetColour ( kwargs.get( 'colour', None ) )
if len( params ) == 1 :
return Text( params[ 0 ], text_props )
result = Inline( text_props )
apply( result.append, params )
return result
def B( *params ) :
text_props = TextPropertySet( bold=True )
if len( params ) == 1 :
return Text( params[ 0 ], text_props )
result = Inline( text_props )
apply( result.append, params )
return result
def I( *params ) :
text_props = TextPropertySet( italic=True )
if len( params ) == 1 :
return Text( params[ 0 ], text_props )
result = Inline( text_props )
apply( result.append, params )
return result
def U( *params ) :
text_props = TextPropertySet( underline=True )
if len( params ) == 1 :
return Text( params[ 0 ], text_props )
result = Inline( text_props )
apply( result.append, params )
return result
| Python |
"""
A Styles is a collection of PropertySets that can be applied to a particular RTF element.
At present there are only two, Text and Paragraph but ListStyles will be added soon too.
"""
from PropertySets import *
class TextStyle :
def __init__( self, text_props, name=None, shading_props=None ) :
self.SetTextPropertySet ( text_props )
self.SetName ( name )
self.SetShadingPropertySet( shading_props )
def Copy( self ) :
return deepcopy( self )
def SetName( self, value ) :
self.Name = value
return self
def SetTextPropertySet( self, value ) :
assert isinstance( value, TextPropertySet )
self.TextPropertySet = value
return self
def SetShadingPropertySet( self, value ) :
assert value is None or isinstance( value, ShadingPropertySet )
self.ShadingPropertySet = value or ShadingPropertySet()
return self
class ParagraphStyle :
def __init__( self, name, text_style, paragraph_props=None, frame_props=None, shading_props=None ) :
# A style must have Font and a Font Size but the Text property set doesn't
# make these mandatory so that they can be used for overrides so at this point
# we need to make sure that that we have these values set
if not text_style.TextPropertySet.Font : raise Exception( 'Paragraph Styles must have a Font specified.' )
if not text_style.TextPropertySet.Size : raise Exception( 'Paragraph Styles must have a Font Size specified.' )
self.SetName ( name )
self.SetTextStyle ( text_style )
self.SetParagraphPropertySet( paragraph_props )
self.SetFramePropertySet ( frame_props )
self.SetShadingPropertySet ( shading_props )
self.SetBasedOn( None )
self.SetNext ( None )
def Copy( self ) :
return deepcopy( self )
def SetName( self, value ) :
self.Name = value
return self
def SetTextStyle( self, value ) :
assert isinstance( value, TextStyle )
self.TextStyle = value
return self
def SetParagraphPropertySet( self, value ) :
assert value is None or isinstance( value, ParagraphPropertySet )
self.ParagraphPropertySet = value or ParagraphPropertySet()
return self
def SetFramePropertySet( self, value ) :
assert value is None or isinstance( value, FramePropertySet )
self.FramePropertySet = value or FramePropertySet()
return self
def SetShadingPropertySet( self, value ) :
"""Set the background shading for the paragraph."""
assert value is None or isinstance( value, ShadingPropertySet )
self.ShadingPropertySet = value or ShadingPropertySet()
return self
def SetBasedOn( self, value ) :
"""Set the Paragraph Style that this one is based on."""
assert not value or isinstance( value, ParagraphStyle )
self.BasedOn = value
return self
def SetNext( self, value ) :
"""Set the Paragraph Style that should follow this one."""
assert not value or isinstance( value, ParagraphStyle )
self.Next = value
return self
| Python |
"""
PropertySets group common attributes together, each property set is used to control a specific part of the rendering.
PropertySets can be used in different elements of the document.
For example the FramePropertySet is used in paragraphs, tables, cells, etc.
The TextPropertySet can be used for text or in a Paragraph Style.
"""
from types import StringType
from copy import deepcopy
#
# We need some basic Type like fonts, colours and paper definitions
#
def MakeAttributeName( value ) :
assert value and type( value ) is StringType
value = value.replace( ' ', '' )
return value
class AttributedList( list ) :
def __init__( self, accepted_type=None ) :
super( AttributedList, self ).__init__()
self.AcceptedType = accepted_type
self._append = super( AttributedList, self ).append
def append( self, *values ) :
for value in values :
if self.AcceptedType : assert isinstance( value, self.AcceptedType )
self._append( value )
name = getattr( value, 'Name', None )
if name :
name = MakeAttributeName( value.Name )
setattr( self, name, value )
def __deepcopy__( self, memo ) :
result = self.__class__()
result.append( *self[:] )
return result
class Colour :
def __init__( self, name, red, green, blue ) :
self.SetName ( name )
self.SetRed ( red )
self.SetGreen( green )
self.SetBlue ( blue )
def SetName( self, value ) :
self.Name = value
return self
def SetRed( self, value ) :
self.Red = value
return self
def SetGreen( self, value ) :
self.Green = value
return self
def SetBlue( self, value ) :
self.Blue = value
return self
class Colours( AttributedList ) :
def __init__( self ) :
super( Colours, self ).__init__( Colour )
class Font :
def __init__( self, name, family, character_set = 0, pitch = None, panose = None, alternate = None ) :
self.SetName ( name )
self.SetFamily ( family )
self.SetCharacterSet( character_set )
self.SetPitch ( pitch )
self.SetPanose ( panose )
self.SetAlternate ( alternate )
def SetName( self, value ) :
self.Name = value
return self
def SetFamily( self, value ) :
self.Family = value
return self
def SetCharacterSet( self, value ) :
self.CharacterSet = value
return self
def SetPitch( self, value ) :
self.Pitch = value
return self
def SetPanose( self, value ) :
self.Panose = value
return self
def SetAlternate( self, value ) :
self.Alternate = value
return self
class Fonts( AttributedList ) :
def __init__( self ) :
super( Fonts, self ).__init__( Font )
class Paper :
def __init__( self, name, code, description, width, height ) :
self.SetName ( name )
self.SetCode ( code )
self.SetDescription( description )
self.SetWidth ( width )
self.SetHeight ( height )
def SetName( self, value ) :
self.Name = value
return self
def SetCode( self, value ) :
self.Code = value
return self
def SetDescription( self, value ) :
self.Description = value
return self
def SetWidth( self, value ) :
self.Width = value
return self
def SetHeight( self, value ) :
self.Height = value
return self
class Papers( AttributedList ) :
def __init__( self ) :
super( Papers, self ).__init__( Paper )
#
# Then we have property sets which represent different aspects of Styles
#
class MarginsPropertySet :
def __init__( self, top=None, left=None, bottom=None, right=None ) :
self.SetTop ( top )
self.SetLeft ( left )
self.SetBottom( bottom )
self.SetRight ( right )
def SetTop( self, value ) :
self.Top = value
return self
def SetLeft( self, value ) :
self.Left = value
return self
def SetBottom( self, value ) :
self.Bottom = value
return self
def SetRight( self, value ) :
self.Right = value
return self
class ShadingPropertySet :
HORIZONTAL = 1
VERTICAL = 2
FORWARD_DIAGONAL = 3
BACKWARD_DIAGONAL = 4
VERTICAL_CROSS = 5
DIAGONAL_CROSS = 6
DARK_HORIZONTAL = 7
DARK_VERTICAL = 8
DARK_FORWARD_DIAGONAL = 9
DARK_BACKWARD_DIAGONAL = 10
DARK_VERTICAL_CROSS = 11
DARK_DIAGONAL_CROSS = 12
PATTERNS = [ HORIZONTAL,
VERTICAL,
FORWARD_DIAGONAL,
BACKWARD_DIAGONAL,
VERTICAL_CROSS,
DIAGONAL_CROSS,
DARK_HORIZONTAL,
DARK_VERTICAL,
DARK_FORWARD_DIAGONAL,
DARK_BACKWARD_DIAGONAL,
DARK_VERTICAL_CROSS,
DARK_DIAGONAL_CROSS ]
def __init__( self, shading=None, pattern=None, foreground=None, background=None ) :
self.SetShading ( shading )
self.SetForeground( foreground )
self.SetBackground( background )
self.SetPattern ( pattern )
def __deepcopy__( self, memo ) :
return ShadingPropertySet( self.Shading,
self.Foreground,
self.Background,
self.Pattern )
def SetShading( self, value ) :
self.Shading = value
return self
def SetPattern( self, value ) :
assert value is None or value in self.PATTERNS
self.Pattern = value
return self
def SetForeground( self, value ) :
assert not value or isinstance( value, Colour )
self.Foreground = value
return self
def SetBackground( self, value ) :
assert not value or isinstance( value, Colour )
self.Background = value
return self
class BorderPropertySet :
SINGLE = 1
DOUBLE = 2
SHADOWED = 3
DOUBLED = 4
DOTTED = 5
DASHED = 6
HAIRLINE = 7
STYLES = [ SINGLE, DOUBLE, SHADOWED, DOUBLED, DOTTED, DASHED, HAIRLINE ]
def __init__( self, width=None, style=None, colour=None, spacing=None ) :
self.SetWidth ( width )
self.SetStyle ( style or self.SINGLE )
self.SetColour ( colour )
self.SetSpacing( spacing )
def SetWidth( self, value ) :
self.Width = value
return self
def SetStyle( self, value ) :
assert value is None or value in self.STYLES
self.Style = value
return self
def SetColour( self, value ) :
assert value is None or isinstance( value, Colour )
self.Colour = value
return self
def SetSpacing( self, value ) :
self.Spacing = value
return self
class FramePropertySet :
def __init__( self, top=None, left=None, bottom=None, right=None ) :
self.SetTop ( top )
self.SetLeft ( left )
self.SetBottom( bottom )
self.SetRight ( right )
def SetTop( self, value ) :
assert value is None or isinstance( value, BorderPropertySet )
self.Top = value
return self
def SetLeft( self, value ) :
assert value is None or isinstance( value, BorderPropertySet )
self.Left = value
return self
def SetBottom( self, value ) :
assert value is None or isinstance( value, BorderPropertySet )
self.Bottom = value
return self
def SetRight( self, value ) :
assert value is None or isinstance( value, BorderPropertySet )
self.Right = value
return self
class TabPropertySet :
DEFAULT_WIDTH = 720
LEFT = 1
RIGHT = 2
CENTER = 3
DECIMAL = 4
ALIGNMENT = [ LEFT, RIGHT, CENTER, DECIMAL ]
DOTS = 1
HYPHENS = 2
UNDERLINE = 3
THICK_LINE = 4
EQUAL_SIGN = 5
LEADERS = [ DOTS, HYPHENS, UNDERLINE, THICK_LINE, EQUAL_SIGN ]
def __init__( self, width=None, alignment=None, leader=None ) :
self.SetWidth ( width )
self.SetAlignment( alignment or self.LEFT )
self.SetLeader ( leader )
def SetWidth( self, value ) :
self.Width = value
return self
def SetAlignment( self, value ) :
assert value in self.ALIGNMENT
self.Alignment = value
return self
def SetLeader( self, value ) :
assert not value or value in self.LEADERS
self.Leader = value
return self
class TextPropertySet :
def __init__( self, font=None, size=None, bold=None, italic=None, underline=None, colour=None, frame=None, expansion=None ) :
self.SetFont ( font )
self.SetSize ( size )
self.SetBold ( bold or False )
self.SetItalic ( italic or False )
self.SetUnderline ( underline or False )
self.SetColour( colour )
self.SetFrame ( frame )
self.SetStrikeThrough ( False )
self.SetDottedUnderline( False )
self.SetDoubleUnderline( False )
self.SetWordUnderline ( False )
self.SetExpansion ( expansion )
def Copy( self ) :
return deepcopy( self )
def __deepcopy__( self, memo ) :
# the font must remain a reference to the same font that we are looking at
# so we want to stop the recursiveness at this point and return an object
# with the right references.
result = TextPropertySet( self.Font,
self.Size,
self.Bold,
self.Italic,
self.Underline,
self.Colour,
deepcopy( self.Frame, memo ) )
result.SetStrikeThrough( self.StrikeThrough )
return result
def SetFont( self, value ) :
assert not value or isinstance( value, Font )
self.Font = value
return self
def SetSize( self, value ) :
self.Size = value
return self
def SetBold( self, value ) :
self.Bold = False
if value : self.Bold = True
return self
def SetItalic( self, value ) :
self.Italic = False
if value : self.Italic = True
return self
def SetUnderline( self, value ) :
self.Underline = False
if value : self.Underline = True
return self
def SetColour( self, value ) :
assert value is None or isinstance( value, Colour )
self.Colour = value
return self
def SetFrame( self, value ) :
assert value is None or isinstance( value, BorderPropertySet )
self.Frame = value
return self
def SetStrikeThrough( self, value ) :
self.StrikeThrough = False
if value : self.StrikeThrough = True
return self
def SetDottedUnderline( self, value ) :
self.DottedUnderline = False
if value : self.DottedUnderline = True
return self
def SetDoubleUnderline( self, value ) :
self.DoubleUnderline = False
if value : self.DoubleUnderline = True
return self
def SetWordUnderline( self, value ) :
self.WordUnderline = False
if value : self.WordUnderline = True
return self
def SetExpansion( self, value ) :
self.Expansion = value
return self
class ParagraphPropertySet :
LEFT = 1
RIGHT = 2
CENTER = 3
JUSTIFY = 4
DISTRIBUTE = 5
ALIGNMENT = [ LEFT, RIGHT, CENTER, JUSTIFY, DISTRIBUTE ]
def __init__( self, alignment=None, space_before=None, space_after=None, tabs=None, first_line_indent=None, left_indent=None, right_indent=None, page_break_before=None ) :
self.SetAlignment ( alignment or self.LEFT )
self.SetSpaceBefore( space_before )
self.SetSpaceAfter ( space_after )
self.Tabs = []
if tabs : apply( self.SetTabs, tabs )
self.SetFirstLineIndent( first_line_indent or None )
self.SetLeftIndent ( left_indent or None )
self.SetRightIndent ( right_indent or None )
self.SetPageBreakBefore( page_break_before )
self.SetSpaceBetweenLines( None )
def Copy( self ) :
return deepcopy( self )
def SetAlignment( self, value ) :
assert not value or value in self.ALIGNMENT
self.Alignment = value or self.LEFT
return self
def SetSpaceBefore( self, value ) :
self.SpaceBefore = value
return self
def SetSpaceAfter( self, value ) :
self.SpaceAfter = value
return self
def SetTabs( self, *params ) :
self.Tabs = params
return self
def SetFirstLineIndent( self, value ) :
self.FirstLineIndent = value
return self
def SetLeftIndent( self, value ) :
self.LeftIndent = value
return self
def SetRightIndent( self, value ) :
self.RightIndent = value
return self
def SetSpaceBetweenLines( self, value ) :
self.SpaceBetweenLines = value
return self
def SetPageBreakBefore( self, value ) :
self.PageBreakBefore = False
if value : self.PageBreakBefore = True
return self
# Some short cuts to make the code a bit easier to read
MarginsPS = MarginsPropertySet
ShadingPS = ShadingPropertySet
BorderPS = BorderPropertySet
FramePS = FramePropertySet
TabPS = TabPropertySet
TextPS = TextPropertySet
ParagraphPS = ParagraphPropertySet
| Python |
from PropertySets import *
from Elements import *
from Styles import *
from Renderer import *
def dumps(doc):
import cStringIO
s=cStringIO.StringIO()
r=Renderer()
r.Write(doc,s)
return s.getvalue()
| Python |
class ViewKind :
"""An integer (0-5) that represents the view mode of the document."""
NONE = 0
PageLayout = 1
Outline = 2
MasterDocument = 3
Normal = 4
OnlineLayout = 5
DEFAULT = PageLayout
def _IsValid( cls, value ) :
return value in [ 0, 1, 2, 3, 4, 5 ]
IsValid = classmethod( _IsValid )
class ViewScale :
"""Zoom level of the document; the N argument is a value representing a percentage (the default is 100)."""
def _IsValid( cls, value ) :
return value is None or (0 < value < 101)
IsValid = classmethod( _IsValid )
class ViewZoomKind :
"""An integer (0 to 2) that represents the zoom kind of the document."""
NONE = 0
FullPage = 1
BestFit = 2
def _IsValid( cls, value ) :
return value in [ None, 0, 1, 2 ]
IsValid = classmethod( _IsValid )
class Languages :
NoLanguage = 1024
Albanian = 1052
Arabic = 1025
Bahasa = 1057
BelgianDutch = 2067
BelgianFrench = 2060
BrazilianPortuguese = 1046
Bulgarian = 1026
Catalan = 1027
CroatoSerbianLatin = 1050
Czech = 1029
Danish = 1030
Dutch = 1043
EnglishAustralian = 3081
EnglishUK = 2057
EnglishUS = 1033
Finnish = 1035
French = 1036
FrenchCanadian = 3084
German = 1031
Greek = 1032
Hebrew = 1037
Hungarian = 1038
Icelandic = 1039
Italian = 1040
Japanese = 1041
Korean = 1042
NorwegianBokmal = 1044
NorwegianNynorsk = 2068
Polish = 1045
Portuguese = 2070
RhaetoRomanic = 1047
Romanian = 1048
Russian = 1049
SerboCroatianCyrillic = 2074
SimplifiedChinese = 2052
Slovak = 1051
SpanishCastilian = 1034
SpanishMexican = 2058
Swedish = 1053
SwissFrench = 4108
SwissGerman = 2055
SwissItalian = 2064
Thai = 1054
TraditionalChinese = 1028
Turkish = 1055
Urdu = 1056
SesothoSotho = 1072
Afrikaans = 1078
Zulu = 1077
Xhosa = 1076
Venda = 1075
Tswana = 1074
Tsonga = 1073
FarsiPersian = 1065
Codes = [ 1024,
1052,
1025,
1057,
2067,
2060,
1046,
1026,
1027,
1050,
1029,
1030,
1043,
3081,
2057,
1033,
1035,
1036,
3084,
1031,
1032,
1037,
1038,
1039,
1040,
1041,
1042,
1044,
2068,
1045,
2070,
1047,
1048,
1049,
2074,
2052,
1051,
1034,
2058,
1053,
4108,
2055,
2064,
1054,
1028,
1055,
1056,
1072,
1078,
1077,
1076,
1075,
1074,
1073,
1065 ]
# make it Australian as that is what I use most of the time
DEFAULT = EnglishAustralian
def _IsValid( cls, value ) :
return value in cls.Codes
IsValid = classmethod( _IsValid )
if __name__ == '__main__' :
PrintHexTable()
| Python |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
# ****************************************************************************
# * Software: FPDF for python *
# * Version: 1.7.1 *
# * Date: 2010-09-10 *
# * Last update: 2012-08-16 *
# * License: LGPL v3.0 *
# * *
# * Original Author (PHP): Olivier PLATHEY 2004-12-31 *
# * Ported to Python 2.4 by Max (maxpat78@yahoo.it) on 2006-05 *
# * Maintainer: Mariano Reingart (reingart@gmail.com) et al since 2008 est. *
# * NOTE: 'I' and 'D' destinations are disabled, and simply print to STDOUT *
# ****************************************************************************
from datetime import datetime
import math
import errno
import os, sys, zlib, struct, re, tempfile, struct
try:
import cPickle as pickle
except ImportError:
import pickle
# Check if PIL is available (tries importing both pypi version and corrected or manually installed versions).
# Necessary for JPEG and GIF support.
try:
try:
import Image
except:
from PIL import Image
except ImportError:
Image = None
from ttfonts import TTFontFile
from fonts import fpdf_charwidths
from php import substr, sprintf, print_r, UTF8ToUTF16BE, UTF8StringToArray
# Global variables
FPDF_VERSION = '1.7.1'
FPDF_FONT_DIR = os.path.join(os.path.dirname(__file__),'font')
SYSTEM_TTFONTS = None
PY3K = sys.version_info >= (3, 0)
def set_global(var, val):
globals()[var] = val
class FPDF(object):
"PDF Generation class"
def __init__(self, orientation='P',unit='mm',format='A4'):
# Some checks
self._dochecks()
# Initialization of properties
self.offsets={} # array of object offsets
self.page=0 # current page number
self.n=2 # current object number
self.buffer='' # buffer holding in-memory PDF
self.pages={} # array containing pages
self.orientation_changes={} # array indicating orientation changes
self.state=0 # current document state
self.fonts={} # array of used fonts
self.font_files={} # array of font files
self.diffs={} # array of encoding differences
self.images={} # array of used images
self.page_links={} # array of links in pages
self.links={} # array of internal links
self.in_footer=0 # flag set when processing footer
self.lastw=0
self.lasth=0 # height of last cell printed
self.font_family='' # current font family
self.font_style='' # current font style
self.font_size_pt=12 # current font size in points
self.underline=0 # underlining flag
self.draw_color='0 G'
self.fill_color='0 g'
self.text_color='0 g'
self.color_flag=0 # indicates whether fill and text colors are different
self.ws=0 # word spacing
self.angle=0
# Standard fonts
self.core_fonts={'courier':'Courier','courierB':'Courier-Bold','courierI':'Courier-Oblique','courierBI':'Courier-BoldOblique',
'helvetica':'Helvetica','helveticaB':'Helvetica-Bold','helveticaI':'Helvetica-Oblique','helveticaBI':'Helvetica-BoldOblique',
'times':'Times-Roman','timesB':'Times-Bold','timesI':'Times-Italic','timesBI':'Times-BoldItalic',
'symbol':'Symbol','zapfdingbats':'ZapfDingbats'}
# Scale factor
if(unit=='pt'):
self.k=1
elif(unit=='mm'):
self.k=72/25.4
elif(unit=='cm'):
self.k=72/2.54
elif(unit=='in'):
self.k=72
else:
self.error('Incorrect unit: '+unit)
# Page format
if(isinstance(format,basestring)):
format=format.lower()
if(format=='a3'):
format=(841.89,1190.55)
elif(format=='a4'):
format=(595.28,841.89)
elif(format=='a5'):
format=(420.94,595.28)
elif(format=='letter'):
format=(612,792)
elif(format=='legal'):
format=(612,1008)
else:
self.error('Unknown page format: '+format)
self.fw_pt=format[0]
self.fh_pt=format[1]
else:
self.fw_pt=format[0]*self.k
self.fh_pt=format[1]*self.k
self.fw=self.fw_pt/self.k
self.fh=self.fh_pt/self.k
# Page orientation
orientation=orientation.lower()
if(orientation=='p' or orientation=='portrait'):
self.def_orientation='P'
self.w_pt=self.fw_pt
self.h_pt=self.fh_pt
elif(orientation=='l' or orientation=='landscape'):
self.def_orientation='L'
self.w_pt=self.fh_pt
self.h_pt=self.fw_pt
else:
self.error('Incorrect orientation: '+orientation)
self.cur_orientation=self.def_orientation
self.w=self.w_pt/self.k
self.h=self.h_pt/self.k
# Page margins (1 cm)
margin=28.35/self.k
self.set_margins(margin,margin)
# Interior cell margin (1 mm)
self.c_margin=margin/10.0
# line width (0.2 mm)
self.line_width=.567/self.k
# Automatic page break
self.set_auto_page_break(1,2*margin)
# Full width display mode
self.set_display_mode('fullwidth')
# Enable compression
self.set_compression(1)
# Set default PDF version number
self.pdf_version='1.3'
def set_margins(self, left,top,right=-1):
"Set left, top and right margins"
self.l_margin=left
self.t_margin=top
if(right==-1):
right=left
self.r_margin=right
def set_left_margin(self, margin):
"Set left margin"
self.l_margin=margin
if(self.page>0 and self.x<margin):
self.x=margin
def set_top_margin(self, margin):
"Set top margin"
self.t_margin=margin
def set_right_margin(self, margin):
"Set right margin"
self.r_margin=margin
def set_auto_page_break(self, auto,margin=0):
"Set auto page break mode and triggering margin"
self.auto_page_break=auto
self.b_margin=margin
self.page_break_trigger=self.h-margin
def set_display_mode(self, zoom,layout='continuous'):
"Set display mode in viewer"
if(zoom=='fullpage' or zoom=='fullwidth' or zoom=='real' or zoom=='default' or not isinstance(zoom,basestring)):
self.zoom_mode=zoom
else:
self.error('Incorrect zoom display mode: '+zoom)
if(layout=='single' or layout=='continuous' or layout=='two' or layout=='default'):
self.layout_mode=layout
else:
self.error('Incorrect layout display mode: '+layout)
def set_compression(self, compress):
"Set page compression"
self.compress=compress
def set_title(self, title):
"Title of document"
self.title=title
def set_subject(self, subject):
"Subject of document"
self.subject=subject
def set_author(self, author):
"Author of document"
self.author=author
def set_keywords(self, keywords):
"Keywords of document"
self.keywords=keywords
def set_creator(self, creator):
"Creator of document"
self.creator=creator
def alias_nb_pages(self, alias='{nb}'):
"Define an alias for total number of pages"
self.str_alias_nb_pages=alias
return alias
def error(self, msg):
"Fatal error"
raise RuntimeError('FPDF error: '+msg)
def open(self):
"Begin document"
self.state=1
def close(self):
"Terminate document"
if(self.state==3):
return
if(self.page==0):
self.add_page()
#Page footer
self.in_footer=1
self.footer()
self.in_footer=0
#close page
self._endpage()
#close document
self._enddoc()
def add_page(self, orientation=''):
"Start a new page"
if(self.state==0):
self.open()
family=self.font_family
if self.underline:
style = self.font_style + 'U'
else:
style = self.font_style
size=self.font_size_pt
lw=self.line_width
dc=self.draw_color
fc=self.fill_color
tc=self.text_color
cf=self.color_flag
if(self.page>0):
#Page footer
self.in_footer=1
self.footer()
self.in_footer=0
#close page
self._endpage()
#Start new page
self._beginpage(orientation)
#Set line cap style to square
self._out('2 J')
#Set line width
self.line_width=lw
self._out(sprintf('%.2f w',lw*self.k))
#Set font
if(family):
self.set_font(family,style,size)
#Set colors
self.draw_color=dc
if(dc!='0 G'):
self._out(dc)
self.fill_color=fc
if(fc!='0 g'):
self._out(fc)
self.text_color=tc
self.color_flag=cf
#Page header
self.header()
#Restore line width
if(self.line_width!=lw):
self.line_width=lw
self._out(sprintf('%.2f w',lw*self.k))
#Restore font
if(family):
self.set_font(family,style,size)
#Restore colors
if(self.draw_color!=dc):
self.draw_color=dc
self._out(dc)
if(self.fill_color!=fc):
self.fill_color=fc
self._out(fc)
self.text_color=tc
self.color_flag=cf
def header(self):
"Header to be implemented in your own inherited class"
pass
def footer(self):
"Footer to be implemented in your own inherited class"
pass
def page_no(self):
"Get current page number"
return self.page
def set_draw_color(self, r,g=-1,b=-1):
"Set color for all stroking operations"
if((r==0 and g==0 and b==0) or g==-1):
self.draw_color=sprintf('%.3f G',r/255.0)
else:
self.draw_color=sprintf('%.3f %.3f %.3f RG',r/255.0,g/255.0,b/255.0)
if(self.page>0):
self._out(self.draw_color)
def set_fill_color(self,r,g=-1,b=-1):
"Set color for all filling operations"
if((r==0 and g==0 and b==0) or g==-1):
self.fill_color=sprintf('%.3f g',r/255.0)
else:
self.fill_color=sprintf('%.3f %.3f %.3f rg',r/255.0,g/255.0,b/255.0)
self.color_flag=(self.fill_color!=self.text_color)
if(self.page>0):
self._out(self.fill_color)
def set_text_color(self, r,g=-1,b=-1):
"Set color for text"
if((r==0 and g==0 and b==0) or g==-1):
self.text_color=sprintf('%.3f g',r/255.0)
else:
self.text_color=sprintf('%.3f %.3f %.3f rg',r/255.0,g/255.0,b/255.0)
self.color_flag=(self.fill_color!=self.text_color)
def get_string_width(self, s):
"Get width of a string in the current font"
cw=self.current_font['cw']
w=0
l=len(s)
if self.unifontsubset:
for char in s:
char = ord(char)
if len(cw) > char:
w += cw[char] # ord(cw[2*char])<<8 + ord(cw[2*char+1])
#elif (char>0 and char<128 and isset($cw[chr($char)])) { $w += $cw[chr($char)]; }
elif (self.current_font['desc']['MissingWidth']) :
w += self.current_font['desc']['MissingWidth']
#elif (isset($this->CurrentFont['MissingWidth'])) { $w += $this->CurrentFont['MissingWidth']; }
else:
w += 500
else:
for i in xrange(0, l):
w += cw.get(s[i],0)
return w*self.font_size/1000.0
def set_line_width(self, width):
"Set line width"
self.line_width=width
if(self.page>0):
self._out(sprintf('%.2f w',width*self.k))
def line(self, x1,y1,x2,y2):
"Draw a line"
self._out(sprintf('%.2f %.2f m %.2f %.2f l S',x1*self.k,(self.h-y1)*self.k,x2*self.k,(self.h-y2)*self.k))
def _set_dash(self, dash_length=False, space_length=False):
if(dash_length and space_length):
s = sprintf('[%.3f %.3f] 0 d', dash_length*self.k, space_length*self.k)
else:
s = '[] 0 d'
self._out(s)
def dashed_line(self, x1,y1,x2,y2, dash_length=1, space_length=1):
"""Draw a dashed line. Same interface as line() except:
- dash_length: Length of the dash
- space_length: Length of the space between dashes"""
self._set_dash(dash_length, space_length)
self.line(x1, y1, x2, y2)
self._set_dash()
def rect(self, x,y,w,h,style=''):
"Draw a rectangle"
if(style=='F'):
op='f'
elif(style=='FD' or style=='DF'):
op='B'
else:
op='S'
self._out(sprintf('%.2f %.2f %.2f %.2f re %s',x*self.k,(self.h-y)*self.k,w*self.k,-h*self.k,op))
def add_font(self, family, style='', fname='', uni=False):
"Add a TrueType or Type1 font"
family = family.lower()
if (fname == ''):
fname = family.replace(' ','') + style.lower() + '.pkl'
if (family == 'arial'):
family = 'helvetica'
style = style.upper()
if (style == 'IB'):
style = 'BI'
fontkey = family+style
if fontkey in self.fonts:
# Font already added!
return
if (uni):
global SYSTEM_TTFONTS
if os.path.exists(fname):
ttffilename = fname
elif (FPDF_FONT_DIR and
os.path.exists(os.path.join(FPDF_FONT_DIR, fname))):
ttffilename = os.path.join(FPDF_FONT_DIR, fname)
elif (SYSTEM_TTFONTS and
os.path.exists(os.path.join(SYSTEM_TTFONTS, fname))):
ttffilename = os.path.join(SYSTEM_TTFONTS, fname)
else:
raise RuntimeError("TTF Font file not found: %s" % fname)
unifilename = os.path.splitext(ttffilename)[0] + '.pkl'
name = ''
if os.path.exists(unifilename):
fh = open(unifilename)
try:
font_dict = pickle.load(fh)
finally:
fh.close()
else:
ttf = TTFontFile()
ttf.getMetrics(ttffilename)
desc = {
'Ascent': int(round(ttf.ascent, 0)),
'Descent': int(round(ttf.descent, 0)),
'CapHeight': int(round(ttf.capHeight, 0)),
'Flags': ttf.flags,
'FontBBox': "[%s %s %s %s]" % (
int(round(ttf.bbox[0], 0)),
int(round(ttf.bbox[1], 0)),
int(round(ttf.bbox[2], 0)),
int(round(ttf.bbox[3], 0))),
'ItalicAngle': int(ttf.italicAngle),
'StemV': int(round(ttf.stemV, 0)),
'MissingWidth': int(round(ttf.defaultWidth, 0)),
}
# Generate metrics .pkl file
font_dict = {
'name': re.sub('[ ()]', '', ttf.fullName),
'type': 'TTF',
'desc': desc,
'up': round(ttf.underlinePosition),
'ut': round(ttf.underlineThickness),
'ttffile': ttffilename,
'fontkey': fontkey,
'originalsize': os.stat(ttffilename).st_size,
'cw': ttf.charWidths,
}
try:
fh = open(unifilename, "w")
pickle.dump(font_dict, fh)
fh.close()
except IOError, e:
if not e.errno == errno.EACCES:
raise # Not a permission error.
del ttf
if hasattr(self,'str_alias_nb_pages'):
sbarr = range(0,57) # include numbers in the subset!
else:
sbarr = range(0,32)
self.fonts[fontkey] = {
'i': len(self.fonts)+1, 'type': font_dict['type'],
'name': font_dict['name'], 'desc': font_dict['desc'],
'up': font_dict['up'], 'ut': font_dict['ut'],
'cw': font_dict['cw'],
'ttffile': font_dict['ttffile'], 'fontkey': fontkey,
'subset': sbarr, 'unifilename': unifilename,
}
self.font_files[fontkey] = {'length1': font_dict['originalsize'],
'type': "TTF", 'ttffile': ttffilename}
self.font_files[fname] = {'type': "TTF"}
else:
fontfile = open(fname)
try:
font_dict = pickle.load(fontfile)
finally:
fontfile.close()
self.fonts[fontkey] = {'i': len(self.fonts)+1}
self.fonts[fontkey].update(font_dict)
if (diff):
#Search existing encodings
d = 0
nb = len(self.diffs)
for i in xrange(1, nb+1):
if(self.diffs[i] == diff):
d = i
break
if (d == 0):
d = nb + 1
self.diffs[d] = diff
self.fonts[fontkey]['diff'] = d
filename = font_dict.get('filename')
if (filename):
if (type == 'TrueType'):
self.font_files[filename]={'length1': originalsize}
else:
self.font_files[filename]={'length1': size1,
'length2': size2}
def set_font(self, family,style='',size=0):
"Select a font; size given in points"
family=family.lower()
if(family==''):
family=self.font_family
if(family=='arial'):
family='helvetica'
elif(family=='symbol' or family=='zapfdingbats'):
style=''
style=style.upper()
if('U' in style):
self.underline=1
style=style.replace('U','')
else:
self.underline=0
if(style=='IB'):
style='BI'
if(size==0):
size=self.font_size_pt
#Test if font is already selected
if(self.font_family==family and self.font_style==style and self.font_size_pt==size):
return
#Test if used for the first time
fontkey=family+style
if fontkey not in self.fonts:
#Check if one of the standard fonts
if fontkey in self.core_fonts:
if fontkey not in fpdf_charwidths:
#Load metric file
name=os.path.join(FPDF_FONT_DIR,family)
if(family=='times' or family=='helvetica'):
name+=style.lower()
execfile(name+'.font')
if fontkey not in fpdf_charwidths:
self.error('Could not include font metric file for'+fontkey)
i=len(self.fonts)+1
self.fonts[fontkey]={'i':i,'type':'core','name':self.core_fonts[fontkey],'up':-100,'ut':50,'cw':fpdf_charwidths[fontkey]}
else:
self.error('Undefined font: '+family+' '+style)
#Select it
self.font_family=family
self.font_style=style
self.font_size_pt=size
self.font_size=size/self.k
self.current_font=self.fonts[fontkey]
self.unifontsubset = (self.fonts[fontkey]['type'] == 'TTF')
if(self.page>0):
self._out(sprintf('BT /F%d %.2f Tf ET',self.current_font['i'],self.font_size_pt))
def set_font_size(self, size):
"Set font size in points"
if(self.font_size_pt==size):
return
self.font_size_pt=size
self.font_size=size/self.k
if(self.page>0):
self._out(sprintf('BT /F%d %.2f Tf ET',self.current_font['i'],self.font_size_pt))
def add_link(self):
"Create a new internal link"
n=len(self.links)+1
self.links[n]=(0,0)
return n
def set_link(self, link,y=0,page=-1):
"Set destination of internal link"
if(y==-1):
y=self.y
if(page==-1):
page=self.page
self.links[link]=[page,y]
def link(self, x,y,w,h,link):
"Put a link on the page"
if not self.page in self.page_links:
self.page_links[self.page] = []
self.page_links[self.page] += [(x*self.k,self.h_pt-y*self.k,w*self.k,h*self.k,link),]
def text(self, x, y, txt=''):
"Output a string"
txt = self.normalize_text(txt)
if (self.unifontsubset):
txt2 = self._escape(UTF8ToUTF16BE(txt, False))
for uni in UTF8StringToArray(txt):
self.current_font['subset'].append(uni)
else:
txt2 = self._escape(txt)
s=sprintf('BT %.2f %.2f Td (%s) Tj ET',x*self.k,(self.h-y)*self.k, txt2)
if(self.underline and txt!=''):
s+=' '+self._dounderline(x,y,txt)
if(self.color_flag):
s='q '+self.text_color+' '+s+' Q'
self._out(s)
def rotate(self, angle, x=None, y=None):
if x is None:
x = self.x
if y is None:
y = self.y;
if self.angle!=0:
self._out('Q')
self.angle = angle
if angle!=0:
angle *= math.pi/180;
c = math.cos(angle);
s = math.sin(angle);
cx = x*self.k;
cy = (self.h-y)*self.k
s = sprintf('q %.5F %.5F %.5F %.5F %.2F %.2F cm 1 0 0 1 %.2F %.2F cm',c,s,-s,c,cx,cy,-cx,-cy)
self._out(s)
def accept_page_break(self):
"Accept automatic page break or not"
return self.auto_page_break
def cell(self, w,h=0,txt='',border=0,ln=0,align='',fill=0,link=''):
"Output a cell"
txt = self.normalize_text(txt)
k=self.k
if(self.y+h>self.page_break_trigger and not self.in_footer and self.accept_page_break()):
#Automatic page break
x=self.x
ws=self.ws
if(ws>0):
self.ws=0
self._out('0 Tw')
self.add_page(self.cur_orientation)
self.x=x
if(ws>0):
self.ws=ws
self._out(sprintf('%.3f Tw',ws*k))
if(w==0):
w=self.w-self.r_margin-self.x
s=''
if(fill==1 or border==1):
if(fill==1):
if border==1:
op='B'
else:
op='f'
else:
op='S'
s=sprintf('%.2f %.2f %.2f %.2f re %s ',self.x*k,(self.h-self.y)*k,w*k,-h*k,op)
if(isinstance(border,basestring)):
x=self.x
y=self.y
if('L' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-y)*k,x*k,(self.h-(y+h))*k)
if('T' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-y)*k,(x+w)*k,(self.h-y)*k)
if('R' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',(x+w)*k,(self.h-y)*k,(x+w)*k,(self.h-(y+h))*k)
if('B' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-(y+h))*k,(x+w)*k,(self.h-(y+h))*k)
if(txt!=''):
if(align=='R'):
dx=w-self.c_margin-self.get_string_width(txt)
elif(align=='C'):
dx=(w-self.get_string_width(txt))/2.0
else:
dx=self.c_margin
if(self.color_flag):
s+='q '+self.text_color+' '
# If multibyte, Tw has no effect - do word spacing using an adjustment before each space
if (self.ws and self.unifontsubset):
for uni in UTF8StringToArray(txt):
self.current_font['subset'].append(uni)
space = self._escape(UTF8ToUTF16BE(' ', False))
s += sprintf('BT 0 Tw %.2F %.2F Td [',(self.x + dx) * k,(self.h - (self.y + 0.5*h+ 0.3 * self.font_size)) * k)
t = txt.split(' ')
numt = len(t)
for i in range(numt):
tx = t[i]
tx = '(' + self._escape(UTF8ToUTF16BE(tx, False)) + ')'
s += sprintf('%s ', tx);
if ((i+1)<numt):
adj = -(self.ws * self.k) * 1000 / self.font_size_pt
s += sprintf('%d(%s) ', adj, space)
s += '] TJ'
s += ' ET'
else:
if (self.unifontsubset):
txt2 = self._escape(UTF8ToUTF16BE(txt, False))
for uni in UTF8StringToArray(txt):
self.current_font['subset'].append(uni)
else:
txt2 = self._escape(txt)
s += sprintf('BT %.2f %.2f Td (%s) Tj ET',(self.x+dx)*k,(self.h-(self.y+.5*h+.3*self.font_size))*k,txt2)
if(self.underline):
s+=' '+self._dounderline(self.x+dx,self.y+.5*h+.3*self.font_size,txt)
if(self.color_flag):
s+=' Q'
if(link):
self.link(self.x+dx,self.y+.5*h-.5*self.font_size,self.get_string_width(txt),self.font_size,link)
if(s):
self._out(s)
self.lasth=h
if(ln>0):
#Go to next line
self.y+=h
if(ln==1):
self.x=self.l_margin
else:
self.x+=w
def multi_cell(self, w, h, txt='', border=0, align='J', fill=0, split_only=False):
"Output text with automatic or explicit line breaks"
txt = self.normalize_text(txt)
ret = [] # if split_only = True, returns splited text cells
cw=self.current_font['cw']
if(w==0):
w=self.w-self.r_margin-self.x
wmax=(w-2*self.c_margin)*1000.0/self.font_size
s=txt.replace("\r",'')
nb=len(s)
if(nb>0 and s[nb-1]=="\n"):
nb-=1
b=0
if(border):
if(border==1):
border='LTRB'
b='LRT'
b2='LR'
else:
b2=''
if('L' in border):
b2+='L'
if('R' in border):
b2+='R'
if ('T' in border):
b=b2+'T'
else:
b=b2
sep=-1
i=0
j=0
l=0
ns=0
nl=1
while(i<nb):
#Get next character
c=s[i]
if(c=="\n"):
#Explicit line break
if(self.ws>0):
self.ws=0
if not split_only:
self._out('0 Tw')
if not split_only:
self.cell(w,h,substr(s,j,i-j),b,2,align,fill)
else:
ret.append(substr(s,j,i-j))
i+=1
sep=-1
j=i
l=0
ns=0
nl+=1
if(border and nl==2):
b=b2
continue
if(c==' '):
sep=i
ls=l
ns+=1
if self.unifontsubset:
l += self.get_string_width(c) / self.font_size*1000.0
else:
l += cw.get(c,0)
if(l>wmax):
#Automatic line break
if(sep==-1):
if(i==j):
i+=1
if(self.ws>0):
self.ws=0
if not split_only:
self._out('0 Tw')
if not split_only:
self.cell(w,h,substr(s,j,i-j),b,2,align,fill)
else:
ret.append(substr(s,j,i-j))
else:
if(align=='J'):
if ns>1:
self.ws=(wmax-ls)/1000.0*self.font_size/(ns-1)
else:
self.ws=0
if not split_only:
self._out(sprintf('%.3f Tw',self.ws*self.k))
if not split_only:
self.cell(w,h,substr(s,j,sep-j),b,2,align,fill)
else:
ret.append(substr(s,j,sep-j))
i=sep+1
sep=-1
j=i
l=0
ns=0
nl+=1
if(border and nl==2):
b=b2
else:
i+=1
#Last chunk
if(self.ws>0):
self.ws=0
if not split_only:
self._out('0 Tw')
if(border and 'B' in border):
b+='B'
if not split_only:
self.cell(w,h,substr(s,j,i-j),b,2,align,fill)
self.x=self.l_margin
else:
ret.append(substr(s,j,i-j))
return ret
def write(self, h, txt='', link=''):
"Output text in flowing mode"
txt = self.normalize_text(txt)
cw=self.current_font['cw']
w=self.w-self.r_margin-self.x
wmax=(w-2*self.c_margin)*1000.0/self.font_size
s=txt.replace("\r",'')
nb=len(s)
sep=-1
i=0
j=0
l=0
nl=1
while(i<nb):
#Get next character
c=s[i]
if(c=="\n"):
#Explicit line break
self.cell(w,h,substr(s,j,i-j),0,2,'',0,link)
i+=1
sep=-1
j=i
l=0
if(nl==1):
self.x=self.l_margin
w=self.w-self.r_margin-self.x
wmax=(w-2*self.c_margin)*1000.0/self.font_size
nl+=1
continue
if(c==' '):
sep=i
if self.unifontsubset:
l += self.get_string_width(c) / self.font_size*1000.0
else:
l += cw.get(c,0)
if(l>wmax):
#Automatic line break
if(sep==-1):
if(self.x>self.l_margin):
#Move to next line
self.x=self.l_margin
self.y+=h
w=self.w-self.r_margin-self.x
wmax=(w-2*self.c_margin)*1000.0/self.font_size
i+=1
nl+=1
continue
if(i==j):
i+=1
self.cell(w,h,substr(s,j,i-j),0,2,'',0,link)
else:
self.cell(w,h,substr(s,j,sep-j),0,2,'',0,link)
i=sep+1
sep=-1
j=i
l=0
if(nl==1):
self.x=self.l_margin
w=self.w-self.r_margin-self.x
wmax=(w-2*self.c_margin)*1000.0/self.font_size
nl+=1
else:
i+=1
#Last chunk
if(i!=j):
self.cell(l/1000.0*self.font_size,h,substr(s,j),0,0,'',0,link)
def image(self, name, x=None, y=None, w=0,h=0,type='',link=''):
"Put an image on the page"
if not name in self.images:
#First use of image, get info
if(type==''):
pos=name.rfind('.')
if(not pos):
self.error('image file has no extension and no type was specified: '+name)
type=substr(name,pos+1)
type=type.lower()
if(type=='jpg' or type=='jpeg'):
info=self._parsejpg(name)
elif(type=='png'):
info=self._parsepng(name)
else:
#Allow for additional formats
#maybe the image is not showing the correct extension,
#but the header is OK,
succeed_parsing = False
#try all the parsing functions
parsing_functions = [self._parsejpg,self._parsepng,self._parsegif]
for pf in parsing_functions:
try:
info = pf(name)
succeed_parsing = True
break;
except:
pass
#last resource
if not succeed_parsing:
mtd='_parse'+type
if not hasattr(self,mtd):
self.error('Unsupported image type: '+type)
info=getattr(self, mtd)(name)
mtd='_parse'+type
if not hasattr(self,mtd):
self.error('Unsupported image type: '+type)
info=getattr(self, mtd)(name)
info['i']=len(self.images)+1
self.images[name]=info
else:
info=self.images[name]
#Automatic width and height calculation if needed
if(w==0 and h==0):
#Put image at 72 dpi
w=info['w']/self.k
h=info['h']/self.k
elif(w==0):
w=h*info['w']/info['h']
elif(h==0):
h=w*info['h']/info['w']
# Flowing mode
if y is None:
if (self.y + h > self.page_break_trigger and not self.in_footer and self.accept_page_break()):
#Automatic page break
x = self.x
self.add_page(self.cur_orientation)
self.x = x
y = self.y
self.y += h
if x is None:
x = self.x
self._out(sprintf('q %.2f 0 0 %.2f %.2f %.2f cm /I%d Do Q',w*self.k,h*self.k,x*self.k,(self.h-(y+h))*self.k,info['i']))
if(link):
self.link(x,y,w,h,link)
def ln(self, h=''):
"Line Feed; default value is last cell height"
self.x=self.l_margin
if(isinstance(h, basestring)):
self.y+=self.lasth
else:
self.y+=h
def get_x(self):
"Get x position"
return self.x
def set_x(self, x):
"Set x position"
if(x>=0):
self.x=x
else:
self.x=self.w+x
def get_y(self):
"Get y position"
return self.y
def set_y(self, y):
"Set y position and reset x"
self.x=self.l_margin
if(y>=0):
self.y=y
else:
self.y=self.h+y
def set_xy(self, x,y):
"Set x and y positions"
self.set_y(y)
self.set_x(x)
def output(self, name='',dest=''):
"Output PDF to some destination"
#Finish document if necessary
if(self.state<3):
self.close()
dest=dest.upper()
if(dest==''):
if(name==''):
name='doc.pdf'
dest='I'
else:
dest='F'
if dest=='I':
print self.buffer
elif dest=='D':
print self.buffer
elif dest=='F':
#Save to local file
f=open(name,'wb')
if(not f):
self.error('Unable to create output file: '+name)
if PY3K:
# TODO: proper unicode support
f.write(self.buffer.encode("latin1"))
else:
f.write(self.buffer)
f.close()
elif dest=='S':
#Return as a string
return self.buffer
else:
self.error('Incorrect output destination: '+dest)
return ''
def normalize_text(self, txt):
"Check that text input is in the correct format/encoding"
# - for TTF unicode fonts: unicode object (utf8 encoding)
# - for built-in fonts: string instances (latin 1 encoding)
if self.unifontsubset and isinstance(txt, str):
txt = txt.decode('utf8')
elif not self.unifontsubset and isinstance(txt, unicode) and not PY3K:
txt = txt.encode('latin1')
return txt
def _dochecks(self):
#Check for locale-related bug
# if(1.1==1):
# self.error("Don\'t alter the locale before including class file");
#Check for decimal separator
if(sprintf('%.1f',1.0)!='1.0'):
import locale
locale.setlocale(locale.LC_NUMERIC,'C')
def _getfontpath(self):
return FPDF_FONT_DIR+'/'
def _putpages(self):
nb=self.page
if hasattr(self,'str_alias_nb_pages'):
# Replace number of pages in fonts using subsets (unicode)
alias = UTF8ToUTF16BE(self.str_alias_nb_pages, False);
r = UTF8ToUTF16BE(str(nb), False)
for n in xrange(1, nb+1):
self.pages[n] = self.pages[n].replace(alias, r)
# Now repeat for no pages in non-subset fonts
for n in xrange(1,nb+1):
self.pages[n]=self.pages[n].replace(self.str_alias_nb_pages,str(nb))
if(self.def_orientation=='P'):
w_pt=self.fw_pt
h_pt=self.fh_pt
else:
w_pt=self.fh_pt
h_pt=self.fw_pt
if self.compress:
filter='/Filter /FlateDecode '
else:
filter=''
for n in xrange(1,nb+1):
#Page
self._newobj()
self._out('<</Type /Page')
self._out('/Parent 1 0 R')
if n in self.orientation_changes:
self._out(sprintf('/MediaBox [0 0 %.2f %.2f]',h_pt,w_pt))
self._out('/Resources 2 0 R')
if self.page_links and n in self.page_links:
#Links
annots='/Annots ['
for pl in self.page_links[n]:
rect=sprintf('%.2f %.2f %.2f %.2f',pl[0],pl[1],pl[0]+pl[2],pl[1]-pl[3])
annots+='<</Type /Annot /Subtype /Link /Rect ['+rect+'] /Border [0 0 0] '
if(isinstance(pl[4],basestring)):
annots+='/A <</S /URI /URI '+self._textstring(pl[4])+'>>>>'
else:
l=self.links[pl[4]]
if l[0] in self.orientation_changes:
h=w_pt
else:
h=h_pt
annots+=sprintf('/Dest [%d 0 R /XYZ 0 %.2f null]>>',1+2*l[0],h-l[1]*self.k)
self._out(annots+']')
if(self.pdf_version>'1.3'):
self._out('/Group <</Type /Group /S /Transparency /CS /DeviceRGB>>')
self._out('/Contents '+str(self.n+1)+' 0 R>>')
self._out('endobj')
#Page content
if self.compress:
p = zlib.compress(self.pages[n])
else:
p = self.pages[n]
self._newobj()
self._out('<<'+filter+'/Length '+str(len(p))+'>>')
self._putstream(p)
self._out('endobj')
#Pages root
self.offsets[1]=len(self.buffer)
self._out('1 0 obj')
self._out('<</Type /Pages')
kids='/Kids ['
for i in xrange(0,nb):
kids+=str(3+2*i)+' 0 R '
self._out(kids+']')
self._out('/Count '+str(nb))
self._out(sprintf('/MediaBox [0 0 %.2f %.2f]',w_pt,h_pt))
self._out('>>')
self._out('endobj')
def _putfonts(self):
nf=self.n
for diff in self.diffs:
#Encodings
self._newobj()
self._out('<</Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences ['+self.diffs[diff]+']>>')
self._out('endobj')
for name,info in self.font_files.iteritems():
if 'type' in info and info['type'] != 'TTF':
#Font file embedding
self._newobj()
self.font_files[name]['n']=self.n
font=''
f=open(self._getfontpath()+name,'rb',1)
if(not f):
self.error('Font file not found')
font=f.read()
f.close()
compressed=(substr(name,-2)=='.z')
if(not compressed and 'length2' in info):
header=(ord(font[0])==128)
if(header):
#Strip first binary header
font=substr(font,6)
if(header and ord(font[info['length1']])==128):
#Strip second binary header
font=substr(font,0,info['length1'])+substr(font,info['length1']+6)
self._out('<</Length '+str(len(font)))
if(compressed):
self._out('/Filter /FlateDecode')
self._out('/Length1 '+str(info['length1']))
if('length2' in info):
self._out('/Length2 '+str(info['length2'])+' /Length3 0')
self._out('>>')
self._putstream(font)
self._out('endobj')
for k,font in self.fonts.iteritems():
#Font objects
self.fonts[k]['n']=self.n+1
type=font['type']
name=font['name']
if(type=='core'):
#Standard font
self._newobj()
self._out('<</Type /Font')
self._out('/BaseFont /'+name)
self._out('/Subtype /Type1')
if(name!='Symbol' and name!='ZapfDingbats'):
self._out('/Encoding /WinAnsiEncoding')
self._out('>>')
self._out('endobj')
elif(type=='Type1' or type=='TrueType'):
#Additional Type1 or TrueType font
self._newobj()
self._out('<</Type /Font')
self._out('/BaseFont /'+name)
self._out('/Subtype /'+type)
self._out('/FirstChar 32 /LastChar 255')
self._out('/Widths '+str(self.n+1)+' 0 R')
self._out('/FontDescriptor '+str(self.n+2)+' 0 R')
if(font['enc']):
if('diff' in font):
self._out('/Encoding '+str(nf+font['diff'])+' 0 R')
else:
self._out('/Encoding /WinAnsiEncoding')
self._out('>>')
self._out('endobj')
#Widths
self._newobj()
cw=font['cw']
s='['
for i in xrange(32,256):
# Get doesn't rise exception; returns 0 instead of None if not set
s+=str(cw.get(chr(i)) or 0)+' '
self._out(s+']')
self._out('endobj')
#Descriptor
self._newobj()
s='<</Type /FontDescriptor /FontName /'+name
for k in ('Ascent', 'Descent', 'CapHeight', 'Falgs', 'FontBBox', 'ItalicAngle', 'StemV', 'MissingWidth'):
s += ' /%s %s' % (k, font['desc'][k])
filename=font['file']
if(filename):
s+=' /FontFile'
if type!='Type1':
s+='2'
s+=' '+str(self.font_files[filename]['n'])+' 0 R'
self._out(s+'>>')
self._out('endobj')
elif (type == 'TTF'):
self.fonts[k]['n'] = self.n + 1
ttf = TTFontFile()
fontname = 'MPDFAA' + '+' + font['name']
subset = font['subset']
del subset[0]
ttfontstream = ttf.makeSubset(font['ttffile'], subset)
ttfontsize = len(ttfontstream)
fontstream = zlib.compress(ttfontstream)
codeToGlyph = ttf.codeToGlyph
##del codeToGlyph[0]
# Type0 Font
# A composite font - a font composed of other fonts, organized hierarchically
self._newobj()
self._out('<</Type /Font');
self._out('/Subtype /Type0');
self._out('/BaseFont /' + fontname + '');
self._out('/Encoding /Identity-H');
self._out('/DescendantFonts [' + str(self.n + 1) + ' 0 R]')
self._out('/ToUnicode ' + str(self.n + 2) + ' 0 R')
self._out('>>')
self._out('endobj')
# CIDFontType2
# A CIDFont whose glyph descriptions are based on TrueType font technology
self._newobj()
self._out('<</Type /Font')
self._out('/Subtype /CIDFontType2')
self._out('/BaseFont /' + fontname + '')
self._out('/CIDSystemInfo ' + str(self.n + 2) + ' 0 R')
self._out('/FontDescriptor ' + str(self.n + 3) + ' 0 R')
if (font['desc'].get('MissingWidth')):
self._out('/DW %d' % font['desc']['MissingWidth'])
self._putTTfontwidths(font, ttf.maxUni)
self._out('/CIDToGIDMap ' + str(self.n + 4) + ' 0 R')
self._out('>>')
self._out('endobj')
# ToUnicode
self._newobj()
toUni = "/CIDInit /ProcSet findresource begin\n" \
"12 dict begin\n" \
"begincmap\n" \
"/CIDSystemInfo\n" \
"<</Registry (Adobe)\n" \
"/Ordering (UCS)\n" \
"/Supplement 0\n" \
">> def\n" \
"/CMapName /Adobe-Identity-UCS def\n" \
"/CMapType 2 def\n" \
"1 begincodespacerange\n" \
"<0000> <FFFF>\n" \
"endcodespacerange\n" \
"1 beginbfrange\n" \
"<0000> <FFFF> <0000>\n" \
"endbfrange\n" \
"endcmap\n" \
"CMapName currentdict /CMap defineresource pop\n" \
"end\n" \
"end"
self._out('<</Length ' + str(len(toUni)) + '>>')
self._putstream(toUni)
self._out('endobj')
# CIDSystemInfo dictionary
self._newobj()
self._out('<</Registry (Adobe)')
self._out('/Ordering (UCS)')
self._out('/Supplement 0')
self._out('>>')
self._out('endobj')
# Font descriptor
self._newobj()
self._out('<</Type /FontDescriptor')
self._out('/FontName /' + fontname)
for kd in ('Ascent', 'Descent', 'CapHeight', 'Flags', 'FontBBox', 'ItalicAngle', 'StemV', 'MissingWidth'):
v = font['desc'][kd]
if (kd == 'Flags'):
v = v | 4;
v = v & ~32; # SYMBOLIC font flag
self._out(' /%s %s' % (kd, v))
self._out('/FontFile2 ' + str(self.n + 2) + ' 0 R')
self._out('>>')
self._out('endobj')
# Embed CIDToGIDMap
# A specification of the mapping from CIDs to glyph indices
cidtogidmap = '';
cidtogidmap = ["\x00"] * 256*256*2
for cc, glyph in codeToGlyph.items():
cidtogidmap[cc*2] = chr(glyph >> 8)
cidtogidmap[cc*2 + 1] = chr(glyph & 0xFF)
cidtogidmap = zlib.compress(''.join(cidtogidmap));
self._newobj()
self._out('<</Length ' + str(len(cidtogidmap)) + '')
self._out('/Filter /FlateDecode')
self._out('>>')
self._putstream(cidtogidmap)
self._out('endobj')
#Font file
self._newobj()
self._out('<</Length ' + str(len(fontstream)))
self._out('/Filter /FlateDecode')
self._out('/Length1 ' + str(ttfontsize))
self._out('>>')
self._putstream(fontstream)
self._out('endobj')
del ttf
else:
#Allow for additional types
mtd='_put'+type.lower()
if(not method_exists(self,mtd)):
self.error('Unsupported font type: '+type)
self.mtd(font)
def _putTTfontwidths(self, font, maxUni):
cw127fname = os.path.splitext(font['unifilename'])[0] + '.cw127.pkl'
if (os.path.exists(cw127fname)):
fh = open(cw127fname);
try:
font_dict = pickle.load(fh)
finally:
fh.close()
rangeid = font_dict['rangeid']
range_ = font_dict['range']
prevcid = font_dict['prevcid']
prevwidth = font_dict['prevwidth']
interval = font_dict['interval']
range_interval = font_dict['range_interval']
startcid = 128
else:
rangeid = 0
range_ = {}
range_interval = {}
prevcid = -2
prevwidth = -1
interval = False
startcid = 1
cwlen = maxUni + 1
# for each character
for cid in range(startcid, cwlen):
if (cid==128 and not os.path.exists(cw127fname)):
try:
fh = open(cw127fname, "wb")
font_dict = {}
font_dict['rangeid'] = rangeid
font_dict['prevcid'] = prevcid
font_dict['prevwidth'] = prevwidth
font_dict['interval'] = interval
font_dict['range_interval'] = range_interval
font_dict['range'] = range_
pickle.dump(font_dict, fh)
fh.close()
except IOError, e:
if not e.errno == errno.EACCES:
raise # Not a permission error.
if (font['cw'][cid] == 0):
continue
width = font['cw'][cid]
if (width == 65535): width = 0
if (cid > 255 and (cid not in font['subset']) or not cid): #
continue
if ('dw' not in font or (font['dw'] and width != font['dw'])):
if (cid == (prevcid + 1)):
if (width == prevwidth):
if (width == range_[rangeid][0]):
range_.setdefault(rangeid, []).append(width)
else:
range_[rangeid].pop()
# new range
rangeid = prevcid
range_[rangeid] = [prevwidth, width]
interval = True
range_interval[rangeid] = True
else:
if (interval):
# new range
rangeid = cid
range_[rangeid] = [width]
else:
range_[rangeid].append(width)
interval = False
else:
rangeid = cid
range_[rangeid] = [width]
interval = False
prevcid = cid
prevwidth = width
prevk = -1
nextk = -1
prevint = False
for k, ws in sorted(range_.items()):
cws = len(ws)
if (k == nextk and not prevint and (not k in range_interval or cws < 3)):
if (k in range_interval):
del range_interval[k]
range_[prevk] = range_[prevk] + range_[k]
del range_[k]
else:
prevk = k
nextk = k + cws
if (k in range_interval):
prevint = (cws > 3)
del range_interval[k]
nextk -= 1
else:
prevint = False
w = []
for k, ws in sorted(range_.items()):
if (len(set(ws)) == 1):
w.append(' %s %s %s' % (k, k + len(ws) - 1, ws[0]))
else:
w.append(' %s [ %s ]\n' % (k, ' '.join([str(int(h)) for h in ws]))) ##
self._out('/W [%s]' % ''.join(w))
def _putimages(self):
filter=''
if self.compress:
filter='/Filter /FlateDecode '
for filename,info in self.images.iteritems():
self._putimage(info)
del info['data']
if 'smask' in info:
del info['smask']
def _putimage(self, info):
if 'data' in info:
self._newobj()
info['n']=self.n
self._out('<</Type /XObject')
self._out('/Subtype /Image')
self._out('/Width '+str(info['w']))
self._out('/Height '+str(info['h']))
if(info['cs']=='Indexed'):
self._out('/ColorSpace [/Indexed /DeviceRGB '+str(len(info['pal'])/3-1)+' '+str(self.n+1)+' 0 R]')
else:
self._out('/ColorSpace /'+info['cs'])
if(info['cs']=='DeviceCMYK'):
self._out('/Decode [1 0 1 0 1 0 1 0]')
self._out('/BitsPerComponent '+str(info['bpc']))
if 'f' in info:
self._out('/Filter /'+info['f'])
if 'dp' in info:
self._out('/DecodeParms <<' + info['dp'] + '>>')
if('trns' in info and isinstance(info['trns'], list)):
trns=''
for i in xrange(0,len(info['trns'])):
trns+=str(info['trns'][i])+' '+str(info['trns'][i])+' '
self._out('/Mask ['+trns+']')
if('smask' in info):
self._out('/SMask ' + str(self.n+1) + ' 0 R');
self._out('/Length '+str(len(info['data']))+'>>')
self._putstream(info['data'])
self._out('endobj')
# Soft mask
if('smask' in info):
dp = '/Predictor 15 /Colors 1 /BitsPerComponent 8 /Columns ' + str(info['w'])
smask = {'w': info['w'], 'h': info['h'], 'cs': 'DeviceGray', 'bpc': 8, 'f': info['f'], 'dp': dp, 'data': info['smask']}
self._putimage(smask)
#Palette
if(info['cs']=='Indexed'):
self._newobj()
filter = self.compress and '/Filter /FlateDecode ' or ''
if self.compress:
pal=zlib.compress(info['pal'])
else:
pal=info['pal']
self._out('<<'+filter+'/Length '+str(len(pal))+'>>')
self._putstream(pal)
self._out('endobj')
def _putxobjectdict(self):
for image in self.images.values():
self._out('/I'+str(image['i'])+' '+str(image['n'])+' 0 R')
def _putresourcedict(self):
self._out('/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]')
self._out('/Font <<')
for font in self.fonts.values():
self._out('/F'+str(font['i'])+' '+str(font['n'])+' 0 R')
self._out('>>')
self._out('/XObject <<')
self._putxobjectdict()
self._out('>>')
def _putresources(self):
self._putfonts()
self._putimages()
#Resource dictionary
self.offsets[2]=len(self.buffer)
self._out('2 0 obj')
self._out('<<')
self._putresourcedict()
self._out('>>')
self._out('endobj')
def _putinfo(self):
self._out('/Producer '+self._textstring('PyFPDF '+FPDF_VERSION+' http://pyfpdf.googlecode.com/'))
if hasattr(self,'title'):
self._out('/Title '+self._textstring(self.title))
if hasattr(self,'subject'):
self._out('/Subject '+self._textstring(self.subject))
if hasattr(self,'author'):
self._out('/Author '+self._textstring(self.author))
if hasattr (self,'keywords'):
self._out('/Keywords '+self._textstring(self.keywords))
if hasattr(self,'creator'):
self._out('/Creator '+self._textstring(self.creator))
self._out('/CreationDate '+self._textstring('D:'+datetime.now().strftime('%Y%m%d%H%M%S')))
def _putcatalog(self):
self._out('/Type /Catalog')
self._out('/Pages 1 0 R')
if(self.zoom_mode=='fullpage'):
self._out('/OpenAction [3 0 R /Fit]')
elif(self.zoom_mode=='fullwidth'):
self._out('/OpenAction [3 0 R /FitH null]')
elif(self.zoom_mode=='real'):
self._out('/OpenAction [3 0 R /XYZ null null 1]')
elif(not isinstance(self.zoom_mode,basestring)):
self._out('/OpenAction [3 0 R /XYZ null null '+(self.zoom_mode/100)+']')
if(self.layout_mode=='single'):
self._out('/PageLayout /SinglePage')
elif(self.layout_mode=='continuous'):
self._out('/PageLayout /OneColumn')
elif(self.layout_mode=='two'):
self._out('/PageLayout /TwoColumnLeft')
def _putheader(self):
self._out('%PDF-'+self.pdf_version)
def _puttrailer(self):
self._out('/Size '+str(self.n+1))
self._out('/Root '+str(self.n)+' 0 R')
self._out('/Info '+str(self.n-1)+' 0 R')
def _enddoc(self):
self._putheader()
self._putpages()
self._putresources()
#Info
self._newobj()
self._out('<<')
self._putinfo()
self._out('>>')
self._out('endobj')
#Catalog
self._newobj()
self._out('<<')
self._putcatalog()
self._out('>>')
self._out('endobj')
#Cross-ref
o=len(self.buffer)
self._out('xref')
self._out('0 '+(str(self.n+1)))
self._out('0000000000 65535 f ')
for i in xrange(1,self.n+1):
self._out(sprintf('%010d 00000 n ',self.offsets[i]))
#Trailer
self._out('trailer')
self._out('<<')
self._puttrailer()
self._out('>>')
self._out('startxref')
self._out(o)
self._out('%%EOF')
self.state=3
def _beginpage(self, orientation):
self.page+=1
self.pages[self.page]=''
self.state=2
self.x=self.l_margin
self.y=self.t_margin
self.font_family=''
#Page orientation
if(not orientation):
orientation=self.def_orientation
else:
orientation=orientation[0].upper()
if(orientation!=self.def_orientation):
self.orientation_changes[self.page]=1
if(orientation!=self.cur_orientation):
#Change orientation
if(orientation=='P'):
self.w_pt=self.fw_pt
self.h_pt=self.fh_pt
self.w=self.fw
self.h=self.fh
else:
self.w_pt=self.fh_pt
self.h_pt=self.fw_pt
self.w=self.fh
self.h=self.fw
self.page_break_trigger=self.h-self.b_margin
self.cur_orientation=orientation
def _endpage(self):
#End of page contents
self.state=1
def _newobj(self):
#Begin a new object
self.n+=1
self.offsets[self.n]=len(self.buffer)
self._out(str(self.n)+' 0 obj')
def _dounderline(self, x,y,txt):
#Underline text
up=self.current_font['up']
ut=self.current_font['ut']
w=self.get_string_width(txt)+self.ws*txt.count(' ')
return sprintf('%.2f %.2f %.2f %.2f re f',x*self.k,(self.h-(y-up/1000.0*self.font_size))*self.k,w*self.k,-ut/1000.0*self.font_size_pt)
def _parsejpg(self, filename):
# Extract info from a JPEG file
if Image is None:
self.error('PIL not installed')
try:
f = open(filename, 'rb')
im = Image.open(f)
except Exception, e:
self.error('Missing or incorrect image file: %s. error: %s' % (filename, str(e)))
else:
a = im.size
# We shouldn't get into here, as Jpeg is RGB=8bpp right(?), but, just in case...
bpc=8
if im.mode == 'RGB':
colspace='DeviceRGB'
elif im.mode == 'CMYK':
colspace='DeviceCMYK'
else:
colspace='DeviceGray'
# Read whole file from the start
f.seek(0)
data = f.read()
f.close()
return {'w':a[0],'h':a[1],'cs':colspace,'bpc':bpc,'f':'DCTDecode','data':data}
def _parsegif(self, filename):
# Extract info from a GIF file (via PNG conversion)
if Image is None:
self.error('PIL is required for GIF support')
try:
im = Image.open(filename)
except Exception, e:
self.error('Missing or incorrect image file: %s. error: %s' % (filename, str(e)))
else:
# Use temporary file
f = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
tmp = f.name
f.close()
if "transparency" in im.info:
im.save(tmp, transparency = im.info['transparency'])
else:
im.save(tmp)
info = self._parsepng(tmp)
os.unlink(tmp)
return info
def _parsepng(self, name):
#Extract info from a PNG file
if name.startswith("http://") or name.startswith("https://"):
import urllib
f = urllib.urlopen(name)
else:
f=open(name,'rb')
if(not f):
self.error("Can't open image file: "+name)
#Check signature
if(f.read(8)!='\x89'+'PNG'+'\r'+'\n'+'\x1a'+'\n'):
self.error('Not a PNG file: '+name)
#Read header chunk
f.read(4)
if(f.read(4)!='IHDR'):
self.error('Incorrect PNG file: '+name)
w=self._freadint(f)
h=self._freadint(f)
bpc=ord(f.read(1))
if(bpc>8):
self.error('16-bit depth not supported: '+name)
ct=ord(f.read(1))
if(ct==0 or ct==4):
colspace='DeviceGray'
elif(ct==2 or ct==6):
colspace='DeviceRGB'
elif(ct==3):
colspace='Indexed'
else:
self.error('Unknown color type: '+name)
if(ord(f.read(1))!=0):
self.error('Unknown compression method: '+name)
if(ord(f.read(1))!=0):
self.error('Unknown filter method: '+name)
if(ord(f.read(1))!=0):
self.error('Interlacing not supported: '+name)
f.read(4)
dp='/Predictor 15 /Colors '
if colspace == 'DeviceRGB':
dp+='3'
else:
dp+='1'
dp+=' /BitsPerComponent '+str(bpc)+' /Columns '+str(w)+''
#Scan chunks looking for palette, transparency and image data
pal=''
trns=''
data=''
n=1
while n != None:
n=self._freadint(f)
type=f.read(4)
if(type=='PLTE'):
#Read palette
pal=f.read(n)
f.read(4)
elif(type=='tRNS'):
#Read transparency info
t=f.read(n)
if(ct==0):
trns=[ord(substr(t,1,1)),]
elif(ct==2):
trns=[ord(substr(t,1,1)),ord(substr(t,3,1)),ord(substr(t,5,1))]
else:
pos=t.find('\x00')
if(pos!=-1):
trns=[pos,]
f.read(4)
elif(type=='IDAT'):
#Read image data block
data+=f.read(n)
f.read(4)
elif(type=='IEND'):
break
else:
f.read(n+4)
if(colspace=='Indexed' and not pal):
self.error('Missing palette in '+name)
f.close()
info = {'w':w,'h':h,'cs':colspace,'bpc':bpc,'f':'FlateDecode','dp':dp,'pal':pal,'trns':trns,}
if(ct>=4):
# Extract alpha channel
data = zlib.decompress(data)
color = '';
alpha = '';
if(ct==4):
# Gray image
length = 2*w
for i in range(h):
pos = (1+length)*i
color += data[pos]
alpha += data[pos]
line = substr(data, pos+1, length)
color += re.sub('(.).',lambda m: m.group(1),line, flags=re.DOTALL)
alpha += re.sub('.(.)',lambda m: m.group(1),line, flags=re.DOTALL)
else:
# RGB image
length = 4*w
for i in range(h):
pos = (1+length)*i
color += data[pos]
alpha += data[pos]
line = substr(data, pos+1, length)
color += re.sub('(.{3}).',lambda m: m.group(1),line, flags=re.DOTALL)
alpha += re.sub('.{3}(.)',lambda m: m.group(1),line, flags=re.DOTALL)
del data
data = zlib.compress(color)
info['smask'] = zlib.compress(alpha)
if (self.pdf_version < '1.4'):
self.pdf_version = '1.4'
info['data'] = data
return info
def _freadint(self, f):
#Read a 4-byte integer from file
try:
return struct.unpack('>I', f.read(4))[0]
except:
return None
def _textstring(self, s):
#Format a text string
return '('+self._escape(s)+')'
def _escape(self, s):
#Add \ before \, ( and )
return s.replace('\\','\\\\').replace(')','\\)').replace('(','\\(').replace('\r','\\r')
def _putstream(self, s):
self._out('stream')
self._out(s)
self._out('endstream')
def _out(self, s):
#Add a line to the document
if(self.state==2):
self.pages[self.page]+=s+"\n"
else:
self.buffer+=str(s)+"\n"
def interleaved2of5(self, txt, x, y, w=1.0, h=10.0):
"Barcode I2of5 (numeric), adds a 0 if odd lenght"
narrow = w / 3.0
wide = w
# wide/narrow codes for the digits
bar_char={'0': 'nnwwn', '1': 'wnnnw', '2': 'nwnnw', '3': 'wwnnn',
'4': 'nnwnw', '5': 'wnwnn', '6': 'nwwnn', '7': 'nnnww',
'8': 'wnnwn', '9': 'nwnwn', 'A': 'nn', 'Z': 'wn'}
self.set_fill_color(0)
code = txt
# add leading zero if code-length is odd
if len(code) % 2 != 0:
code = '0' + code
# add start and stop codes
code = 'AA' + code.lower() + 'ZA'
for i in xrange(0, len(code), 2):
# choose next pair of digits
char_bar = code[i]
char_space = code[i+1]
# check whether it is a valid digit
if not char_bar in bar_char.keys():
raise RuntimeError ('Char "%s" invalid for I25: ' % char_bar)
if not char_space in bar_char.keys():
raise RuntimeError ('Char "%s" invalid for I25: ' % char_space)
# create a wide/narrow-seq (first digit=bars, second digit=spaces)
seq = ''
for s in xrange(0, len(bar_char[char_bar])):
seq += bar_char[char_bar][s] + bar_char[char_space][s]
for bar in xrange(0, len(seq)):
# set line_width depending on value
if seq[bar] == 'n':
line_width = narrow
else:
line_width = wide
# draw every second value, the other is represented by space
if bar % 2 == 0:
self.rect(x, y, line_width, h, 'F')
x += line_width
def code39(self, txt, x, y, w=1.5, h=5.0):
"Barcode 3of9"
wide = w
narrow = w / 3.0
gap = narrow
bar_char={'0': 'nnnwwnwnn', '1': 'wnnwnnnnw', '2': 'nnwwnnnnw',
'3': 'wnwwnnnnn', '4': 'nnnwwnnnw', '5': 'wnnwwnnnn',
'6': 'nnwwwnnnn', '7': 'nnnwnnwnw', '8': 'wnnwnnwnn',
'9': 'nnwwnnwnn', 'A': 'wnnnnwnnw', 'B': 'nnwnnwnnw',
'C': 'wnwnnwnnn', 'D': 'nnnnwwnnw', 'E': 'wnnnwwnnn',
'F': 'nnwnwwnnn', 'G': 'nnnnnwwnw', 'H': 'wnnnnwwnn',
'I': 'nnwnnwwnn', 'J': 'nnnnwwwnn', 'K': 'wnnnnnnww',
'L': 'nnwnnnnww', 'M': 'wnwnnnnwn', 'N': 'nnnnwnnww',
'O': 'wnnnwnnwn', 'P': 'nnwnwnnwn', 'Q': 'nnnnnnwww',
'R': 'wnnnnnwwn', 'S': 'nnwnnnwwn', 'T': 'nnnnwnwwn',
'U': 'wwnnnnnnw', 'V': 'nwwnnnnnw', 'W': 'wwwnnnnnn',
'X': 'nwnnwnnnw', 'Y': 'wwnnwnnnn', 'Z': 'nwwnwnnnn',
'-': 'nwnnnnwnw', '.': 'wwnnnnwnn', ' ': 'nwwnnnwnn',
'*': 'nwnnwnwnn', '$': 'nwnwnwnnn', '/': 'nwnwnnnwn',
'+': 'nwnnnwnwn', '%': 'nnnwnwnwn'}
self.set_fill_color(0)
code = txt
code = code.upper()
for i in xrange (0, len(code), 2):
char_bar = code[i]
if not char_bar in bar_char.keys():
raise RuntimeError ('Char "%s" invalid for Code39' % char_bar)
seq= ''
for s in xrange(0, len(bar_char[char_bar])):
seq += bar_char[char_bar][s]
for bar in xrange(0, len(seq)):
if seq[bar] == 'n':
line_width = narrow
else:
line_width = wide
if bar % 2 == 0:
self.rect(x, y, line_width, h, 'F')
x += line_width
x += gap
| Python |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
# Fonts:
fpdf_charwidths = {}
fpdf_charwidths['courier']={}
for i in xrange(0,256):
fpdf_charwidths['courier'][chr(i)]=600
fpdf_charwidths['courierB']=fpdf_charwidths['courier']
fpdf_charwidths['courierI']=fpdf_charwidths['courier']
fpdf_charwidths['courierBI']=fpdf_charwidths['courier']
fpdf_charwidths['helvetica']={
'\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278,
'\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':278,'"':355,'#':556,'$':556,'%':889,'&':667,'\'':191,'(':333,')':333,'*':389,'+':584,
',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':278,';':278,'<':584,'=':584,'>':584,'?':556,'@':1015,'A':667,
'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':500,'K':667,'L':556,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,
'X':667,'Y':667,'Z':611,'[':278,'\\':278,']':278,'^':469,'_':556,'`':333,'a':556,'b':556,'c':500,'d':556,'e':556,'f':278,'g':556,'h':556,'i':222,'j':222,'k':500,'l':222,'m':833,
'n':556,'o':556,'p':556,'q':556,'r':333,'s':500,'t':278,'u':556,'v':500,'w':722,'x':500,'y':500,'z':500,'{':334,'|':260,'}':334,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':222,'\x83':556,
'\x84':333,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':222,'\x92':222,'\x93':333,'\x94':333,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000,
'\x9a':500,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':260,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333,
'\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':556,'\xb6':537,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':667,'\xc1':667,'\xc2':667,'\xc3':667,'\xc4':667,'\xc5':667,
'\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':500,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':556,'\xf1':556,
'\xf2':556,'\xf3':556,'\xf4':556,'\xf5':556,'\xf6':556,'\xf7':584,'\xf8':611,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':500,'\xfe':556,'\xff':500}
fpdf_charwidths['helveticaB']={
'\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278,
'\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':333,'"':474,'#':556,'$':556,'%':889,'&':722,'\'':238,'(':333,')':333,'*':389,'+':584,
',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':333,';':333,'<':584,'=':584,'>':584,'?':611,'@':975,'A':722,
'B':722,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':556,'K':722,'L':611,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,
'X':667,'Y':667,'Z':611,'[':333,'\\':278,']':333,'^':584,'_':556,'`':333,'a':556,'b':611,'c':556,'d':611,'e':556,'f':333,'g':611,'h':611,'i':278,'j':278,'k':556,'l':278,'m':889,
'n':611,'o':611,'p':611,'q':611,'r':389,'s':556,'t':333,'u':611,'v':556,'w':778,'x':556,'y':556,'z':500,'{':389,'|':280,'}':389,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':278,'\x83':556,
'\x84':500,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':278,'\x92':278,'\x93':500,'\x94':500,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000,
'\x9a':556,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':280,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333,
'\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':611,'\xb6':556,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722,
'\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':556,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':611,'\xf1':611,
'\xf2':611,'\xf3':611,'\xf4':611,'\xf5':611,'\xf6':611,'\xf7':584,'\xf8':611,'\xf9':611,'\xfa':611,'\xfb':611,'\xfc':611,'\xfd':556,'\xfe':611,'\xff':556
}
fpdf_charwidths['helveticaBI']={
'\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278,
'\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':333,'"':474,'#':556,'$':556,'%':889,'&':722,'\'':238,'(':333,')':333,'*':389,'+':584,
',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':333,';':333,'<':584,'=':584,'>':584,'?':611,'@':975,'A':722,
'B':722,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':556,'K':722,'L':611,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,
'X':667,'Y':667,'Z':611,'[':333,'\\':278,']':333,'^':584,'_':556,'`':333,'a':556,'b':611,'c':556,'d':611,'e':556,'f':333,'g':611,'h':611,'i':278,'j':278,'k':556,'l':278,'m':889,
'n':611,'o':611,'p':611,'q':611,'r':389,'s':556,'t':333,'u':611,'v':556,'w':778,'x':556,'y':556,'z':500,'{':389,'|':280,'}':389,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':278,'\x83':556,
'\x84':500,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':278,'\x92':278,'\x93':500,'\x94':500,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000,
'\x9a':556,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':280,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333,
'\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':611,'\xb6':556,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722,
'\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':556,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':611,'\xf1':611,
'\xf2':611,'\xf3':611,'\xf4':611,'\xf5':611,'\xf6':611,'\xf7':584,'\xf8':611,'\xf9':611,'\xfa':611,'\xfb':611,'\xfc':611,'\xfd':556,'\xfe':611,'\xff':556}
fpdf_charwidths['helveticaI']={
'\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278,
'\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':278,'"':355,'#':556,'$':556,'%':889,'&':667,'\'':191,'(':333,')':333,'*':389,'+':584,
',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':278,';':278,'<':584,'=':584,'>':584,'?':556,'@':1015,'A':667,
'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':500,'K':667,'L':556,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,
'X':667,'Y':667,'Z':611,'[':278,'\\':278,']':278,'^':469,'_':556,'`':333,'a':556,'b':556,'c':500,'d':556,'e':556,'f':278,'g':556,'h':556,'i':222,'j':222,'k':500,'l':222,'m':833,
'n':556,'o':556,'p':556,'q':556,'r':333,'s':500,'t':278,'u':556,'v':500,'w':722,'x':500,'y':500,'z':500,'{':334,'|':260,'}':334,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':222,'\x83':556,
'\x84':333,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':222,'\x92':222,'\x93':333,'\x94':333,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000,
'\x9a':500,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':260,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333,
'\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':556,'\xb6':537,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':667,'\xc1':667,'\xc2':667,'\xc3':667,'\xc4':667,'\xc5':667,
'\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':500,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':556,'\xf1':556,
'\xf2':556,'\xf3':556,'\xf4':556,'\xf5':556,'\xf6':556,'\xf7':584,'\xf8':611,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':500,'\xfe':556,'\xff':500}
fpdf_charwidths['symbol']={
'\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
'\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':713,'#':500,'$':549,'%':833,'&':778,'\'':439,'(':333,')':333,'*':500,'+':549,
',':250,'-':549,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':278,';':278,'<':549,'=':549,'>':549,'?':444,'@':549,'A':722,
'B':667,'C':722,'D':612,'E':611,'F':763,'G':603,'H':722,'I':333,'J':631,'K':722,'L':686,'M':889,'N':722,'O':722,'P':768,'Q':741,'R':556,'S':592,'T':611,'U':690,'V':439,'W':768,
'X':645,'Y':795,'Z':611,'[':333,'\\':863,']':333,'^':658,'_':500,'`':500,'a':631,'b':549,'c':549,'d':494,'e':439,'f':521,'g':411,'h':603,'i':329,'j':603,'k':549,'l':549,'m':576,
'n':521,'o':549,'p':549,'q':521,'r':549,'s':603,'t':439,'u':576,'v':713,'w':686,'x':493,'y':686,'z':494,'{':480,'|':200,'}':480,'~':549,'\x7f':0,'\x80':0,'\x81':0,'\x82':0,'\x83':0,
'\x84':0,'\x85':0,'\x86':0,'\x87':0,'\x88':0,'\x89':0,'\x8a':0,'\x8b':0,'\x8c':0,'\x8d':0,'\x8e':0,'\x8f':0,'\x90':0,'\x91':0,'\x92':0,'\x93':0,'\x94':0,'\x95':0,'\x96':0,'\x97':0,'\x98':0,'\x99':0,
'\x9a':0,'\x9b':0,'\x9c':0,'\x9d':0,'\x9e':0,'\x9f':0,'\xa0':750,'\xa1':620,'\xa2':247,'\xa3':549,'\xa4':167,'\xa5':713,'\xa6':500,'\xa7':753,'\xa8':753,'\xa9':753,'\xaa':753,'\xab':1042,'\xac':987,'\xad':603,'\xae':987,'\xaf':603,
'\xb0':400,'\xb1':549,'\xb2':411,'\xb3':549,'\xb4':549,'\xb5':713,'\xb6':494,'\xb7':460,'\xb8':549,'\xb9':549,'\xba':549,'\xbb':549,'\xbc':1000,'\xbd':603,'\xbe':1000,'\xbf':658,'\xc0':823,'\xc1':686,'\xc2':795,'\xc3':987,'\xc4':768,'\xc5':768,
'\xc6':823,'\xc7':768,'\xc8':768,'\xc9':713,'\xca':713,'\xcb':713,'\xcc':713,'\xcd':713,'\xce':713,'\xcf':713,'\xd0':768,'\xd1':713,'\xd2':790,'\xd3':790,'\xd4':890,'\xd5':823,'\xd6':549,'\xd7':250,'\xd8':713,'\xd9':603,'\xda':603,'\xdb':1042,
'\xdc':987,'\xdd':603,'\xde':987,'\xdf':603,'\xe0':494,'\xe1':329,'\xe2':790,'\xe3':790,'\xe4':786,'\xe5':713,'\xe6':384,'\xe7':384,'\xe8':384,'\xe9':384,'\xea':384,'\xeb':384,'\xec':494,'\xed':494,'\xee':494,'\xef':494,'\xf0':0,'\xf1':329,
'\xf2':274,'\xf3':686,'\xf4':686,'\xf5':686,'\xf6':384,'\xf7':384,'\xf8':384,'\xf9':384,'\xfa':384,'\xfb':384,'\xfc':494,'\xfd':494,'\xfe':494,'\xff':0}
fpdf_charwidths['times']={
'\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
'\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':408,'#':500,'$':500,'%':833,'&':778,'\'':180,'(':333,')':333,'*':500,'+':564,
',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':278,';':278,'<':564,'=':564,'>':564,'?':444,'@':921,'A':722,
'B':667,'C':667,'D':722,'E':611,'F':556,'G':722,'H':722,'I':333,'J':389,'K':722,'L':611,'M':889,'N':722,'O':722,'P':556,'Q':722,'R':667,'S':556,'T':611,'U':722,'V':722,'W':944,
'X':722,'Y':722,'Z':611,'[':333,'\\':278,']':333,'^':469,'_':500,'`':333,'a':444,'b':500,'c':444,'d':500,'e':444,'f':333,'g':500,'h':500,'i':278,'j':278,'k':500,'l':278,'m':778,
'n':500,'o':500,'p':500,'q':500,'r':333,'s':389,'t':278,'u':500,'v':500,'w':722,'x':500,'y':500,'z':444,'{':480,'|':200,'}':480,'~':541,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500,
'\x84':444,'\x85':1000,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':556,'\x8b':333,'\x8c':889,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':444,'\x94':444,'\x95':350,'\x96':500,'\x97':1000,'\x98':333,'\x99':980,
'\x9a':389,'\x9b':333,'\x9c':722,'\x9d':350,'\x9e':444,'\x9f':722,'\xa0':250,'\xa1':333,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':200,'\xa7':500,'\xa8':333,'\xa9':760,'\xaa':276,'\xab':500,'\xac':564,'\xad':333,'\xae':760,'\xaf':333,
'\xb0':400,'\xb1':564,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':500,'\xb6':453,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':310,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':444,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722,
'\xc6':889,'\xc7':667,'\xc8':611,'\xc9':611,'\xca':611,'\xcb':611,'\xcc':333,'\xcd':333,'\xce':333,'\xcf':333,'\xd0':722,'\xd1':722,'\xd2':722,'\xd3':722,'\xd4':722,'\xd5':722,'\xd6':722,'\xd7':564,'\xd8':722,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':722,'\xde':556,'\xdf':500,'\xe0':444,'\xe1':444,'\xe2':444,'\xe3':444,'\xe4':444,'\xe5':444,'\xe6':667,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':500,
'\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':564,'\xf8':500,'\xf9':500,'\xfa':500,'\xfb':500,'\xfc':500,'\xfd':500,'\xfe':500,'\xff':500}
fpdf_charwidths['timesB']={
'\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
'\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':555,'#':500,'$':500,'%':1000,'&':833,'\'':278,'(':333,')':333,'*':500,'+':570,
',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':333,';':333,'<':570,'=':570,'>':570,'?':500,'@':930,'A':722,
'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':778,'I':389,'J':500,'K':778,'L':667,'M':944,'N':722,'O':778,'P':611,'Q':778,'R':722,'S':556,'T':667,'U':722,'V':722,'W':1000,
'X':722,'Y':722,'Z':667,'[':333,'\\':278,']':333,'^':581,'_':500,'`':333,'a':500,'b':556,'c':444,'d':556,'e':444,'f':333,'g':500,'h':556,'i':278,'j':333,'k':556,'l':278,'m':833,
'n':556,'o':500,'p':556,'q':556,'r':444,'s':389,'t':333,'u':556,'v':500,'w':722,'x':500,'y':500,'z':444,'{':394,'|':220,'}':394,'~':520,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500,
'\x84':500,'\x85':1000,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':556,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':667,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':500,'\x94':500,'\x95':350,'\x96':500,'\x97':1000,'\x98':333,'\x99':1000,
'\x9a':389,'\x9b':333,'\x9c':722,'\x9d':350,'\x9e':444,'\x9f':722,'\xa0':250,'\xa1':333,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':220,'\xa7':500,'\xa8':333,'\xa9':747,'\xaa':300,'\xab':500,'\xac':570,'\xad':333,'\xae':747,'\xaf':333,
'\xb0':400,'\xb1':570,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':556,'\xb6':540,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':330,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':500,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722,
'\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':389,'\xcd':389,'\xce':389,'\xcf':389,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':570,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':722,'\xde':611,'\xdf':556,'\xe0':500,'\xe1':500,'\xe2':500,'\xe3':500,'\xe4':500,'\xe5':500,'\xe6':722,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':556,
'\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':570,'\xf8':500,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':500,'\xfe':556,'\xff':500}
fpdf_charwidths['timesBI']={
'\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
'\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':389,'"':555,'#':500,'$':500,'%':833,'&':778,'\'':278,'(':333,')':333,'*':500,'+':570,
',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':333,';':333,'<':570,'=':570,'>':570,'?':500,'@':832,'A':667,
'B':667,'C':667,'D':722,'E':667,'F':667,'G':722,'H':778,'I':389,'J':500,'K':667,'L':611,'M':889,'N':722,'O':722,'P':611,'Q':722,'R':667,'S':556,'T':611,'U':722,'V':667,'W':889,
'X':667,'Y':611,'Z':611,'[':333,'\\':278,']':333,'^':570,'_':500,'`':333,'a':500,'b':500,'c':444,'d':500,'e':444,'f':333,'g':500,'h':556,'i':278,'j':278,'k':500,'l':278,'m':778,
'n':556,'o':500,'p':500,'q':500,'r':389,'s':389,'t':278,'u':556,'v':444,'w':667,'x':500,'y':444,'z':389,'{':348,'|':220,'}':348,'~':570,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500,
'\x84':500,'\x85':1000,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':556,'\x8b':333,'\x8c':944,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':500,'\x94':500,'\x95':350,'\x96':500,'\x97':1000,'\x98':333,'\x99':1000,
'\x9a':389,'\x9b':333,'\x9c':722,'\x9d':350,'\x9e':389,'\x9f':611,'\xa0':250,'\xa1':389,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':220,'\xa7':500,'\xa8':333,'\xa9':747,'\xaa':266,'\xab':500,'\xac':606,'\xad':333,'\xae':747,'\xaf':333,
'\xb0':400,'\xb1':570,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':576,'\xb6':500,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':300,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':500,'\xc0':667,'\xc1':667,'\xc2':667,'\xc3':667,'\xc4':667,'\xc5':667,
'\xc6':944,'\xc7':667,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':389,'\xcd':389,'\xce':389,'\xcf':389,'\xd0':722,'\xd1':722,'\xd2':722,'\xd3':722,'\xd4':722,'\xd5':722,'\xd6':722,'\xd7':570,'\xd8':722,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':611,'\xde':611,'\xdf':500,'\xe0':500,'\xe1':500,'\xe2':500,'\xe3':500,'\xe4':500,'\xe5':500,'\xe6':722,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':556,
'\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':570,'\xf8':500,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':444,'\xfe':500,'\xff':444}
fpdf_charwidths['timesI']={
'\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
'\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':420,'#':500,'$':500,'%':833,'&':778,'\'':214,'(':333,')':333,'*':500,'+':675,
',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':333,';':333,'<':675,'=':675,'>':675,'?':500,'@':920,'A':611,
'B':611,'C':667,'D':722,'E':611,'F':611,'G':722,'H':722,'I':333,'J':444,'K':667,'L':556,'M':833,'N':667,'O':722,'P':611,'Q':722,'R':611,'S':500,'T':556,'U':722,'V':611,'W':833,
'X':611,'Y':556,'Z':556,'[':389,'\\':278,']':389,'^':422,'_':500,'`':333,'a':500,'b':500,'c':444,'d':500,'e':444,'f':278,'g':500,'h':500,'i':278,'j':278,'k':444,'l':278,'m':722,
'n':500,'o':500,'p':500,'q':500,'r':389,'s':389,'t':278,'u':500,'v':444,'w':667,'x':444,'y':444,'z':389,'{':400,'|':275,'}':400,'~':541,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500,
'\x84':556,'\x85':889,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':500,'\x8b':333,'\x8c':944,'\x8d':350,'\x8e':556,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':556,'\x94':556,'\x95':350,'\x96':500,'\x97':889,'\x98':333,'\x99':980,
'\x9a':389,'\x9b':333,'\x9c':667,'\x9d':350,'\x9e':389,'\x9f':556,'\xa0':250,'\xa1':389,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':275,'\xa7':500,'\xa8':333,'\xa9':760,'\xaa':276,'\xab':500,'\xac':675,'\xad':333,'\xae':760,'\xaf':333,
'\xb0':400,'\xb1':675,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':500,'\xb6':523,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':310,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':500,'\xc0':611,'\xc1':611,'\xc2':611,'\xc3':611,'\xc4':611,'\xc5':611,
'\xc6':889,'\xc7':667,'\xc8':611,'\xc9':611,'\xca':611,'\xcb':611,'\xcc':333,'\xcd':333,'\xce':333,'\xcf':333,'\xd0':722,'\xd1':667,'\xd2':722,'\xd3':722,'\xd4':722,'\xd5':722,'\xd6':722,'\xd7':675,'\xd8':722,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':556,'\xde':611,'\xdf':500,'\xe0':500,'\xe1':500,'\xe2':500,'\xe3':500,'\xe4':500,'\xe5':500,'\xe6':667,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':500,
'\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':675,'\xf8':500,'\xf9':500,'\xfa':500,'\xfb':500,'\xfc':500,'\xfd':444,'\xfe':500,'\xff':444}
fpdf_charwidths['zapfdingbats']={
'\x00':0,'\x01':0,'\x02':0,'\x03':0,'\x04':0,'\x05':0,'\x06':0,'\x07':0,'\x08':0,'\t':0,'\n':0,'\x0b':0,'\x0c':0,'\r':0,'\x0e':0,'\x0f':0,'\x10':0,'\x11':0,'\x12':0,'\x13':0,'\x14':0,'\x15':0,
'\x16':0,'\x17':0,'\x18':0,'\x19':0,'\x1a':0,'\x1b':0,'\x1c':0,'\x1d':0,'\x1e':0,'\x1f':0,' ':278,'!':974,'"':961,'#':974,'$':980,'%':719,'&':789,'\'':790,'(':791,')':690,'*':960,'+':939,
',':549,'-':855,'.':911,'/':933,'0':911,'1':945,'2':974,'3':755,'4':846,'5':762,'6':761,'7':571,'8':677,'9':763,':':760,';':759,'<':754,'=':494,'>':552,'?':537,'@':577,'A':692,
'B':786,'C':788,'D':788,'E':790,'F':793,'G':794,'H':816,'I':823,'J':789,'K':841,'L':823,'M':833,'N':816,'O':831,'P':923,'Q':744,'R':723,'S':749,'T':790,'U':792,'V':695,'W':776,
'X':768,'Y':792,'Z':759,'[':707,'\\':708,']':682,'^':701,'_':826,'`':815,'a':789,'b':789,'c':707,'d':687,'e':696,'f':689,'g':786,'h':787,'i':713,'j':791,'k':785,'l':791,'m':873,
'n':761,'o':762,'p':762,'q':759,'r':759,'s':892,'t':892,'u':788,'v':784,'w':438,'x':138,'y':277,'z':415,'{':392,'|':392,'}':668,'~':668,'\x7f':0,'\x80':390,'\x81':390,'\x82':317,'\x83':317,
'\x84':276,'\x85':276,'\x86':509,'\x87':509,'\x88':410,'\x89':410,'\x8a':234,'\x8b':234,'\x8c':334,'\x8d':334,'\x8e':0,'\x8f':0,'\x90':0,'\x91':0,'\x92':0,'\x93':0,'\x94':0,'\x95':0,'\x96':0,'\x97':0,'\x98':0,'\x99':0,
'\x9a':0,'\x9b':0,'\x9c':0,'\x9d':0,'\x9e':0,'\x9f':0,'\xa0':0,'\xa1':732,'\xa2':544,'\xa3':544,'\xa4':910,'\xa5':667,'\xa6':760,'\xa7':760,'\xa8':776,'\xa9':595,'\xaa':694,'\xab':626,'\xac':788,'\xad':788,'\xae':788,'\xaf':788,
'\xb0':788,'\xb1':788,'\xb2':788,'\xb3':788,'\xb4':788,'\xb5':788,'\xb6':788,'\xb7':788,'\xb8':788,'\xb9':788,'\xba':788,'\xbb':788,'\xbc':788,'\xbd':788,'\xbe':788,'\xbf':788,'\xc0':788,'\xc1':788,'\xc2':788,'\xc3':788,'\xc4':788,'\xc5':788,
'\xc6':788,'\xc7':788,'\xc8':788,'\xc9':788,'\xca':788,'\xcb':788,'\xcc':788,'\xcd':788,'\xce':788,'\xcf':788,'\xd0':788,'\xd1':788,'\xd2':788,'\xd3':788,'\xd4':894,'\xd5':838,'\xd6':1016,'\xd7':458,'\xd8':748,'\xd9':924,'\xda':748,'\xdb':918,
'\xdc':927,'\xdd':928,'\xde':928,'\xdf':834,'\xe0':873,'\xe1':828,'\xe2':924,'\xe3':924,'\xe4':917,'\xe5':930,'\xe6':931,'\xe7':463,'\xe8':883,'\xe9':836,'\xea':836,'\xeb':867,'\xec':867,'\xed':696,'\xee':696,'\xef':874,'\xf0':0,'\xf1':874,
'\xf2':760,'\xf3':946,'\xf4':771,'\xf5':865,'\xf6':771,'\xf7':888,'\xf8':967,'\xf9':888,'\xfa':831,'\xfb':873,'\xfc':927,'\xfd':970,'\xfe':918,'\xff':0}
| Python |
#******************************************************************************
# TTFontFile class
#
# This class is based on The ReportLab Open Source PDF library
# written in Python - http://www.reportlab.com/software/opensource/
# together with ideas from the OpenOffice source code and others.
#
# Version: 1.04
# Date: 2011-09-18
# Author: Ian Back <ianb@bpm1.com>
# License: LGPL
# Copyright (c) Ian Back, 2010
# Ported to Python 2.7 by Mariano Reingart (reingart@gmail.com) on 2012
# This header must be retained in any redistribution or
# modification of the file.
#
#******************************************************************************
from struct import pack, unpack, unpack_from
import re
import warnings
from php import die, substr, str_repeat, str_pad, strlen, count
# Define the value used in the "head" table of a created TTF file
# 0x74727565 "true" for Mac
# 0x00010000 for Windows
# Either seems to work for a font embedded in a PDF file
# when read by Adobe Reader on a Windows PC(!)
_TTF_MAC_HEADER = False
# TrueType Font Glyph operators
GF_WORDS = (1 << 0)
GF_SCALE = (1 << 3)
GF_MORE = (1 << 5)
GF_XYSCALE = (1 << 6)
GF_TWOBYTWO = (1 << 7)
def sub32(x, y):
xlo = x[1]
xhi = x[0]
ylo = y[1]
yhi = y[0]
if (ylo > xlo):
xlo += 1 << 16
yhi += 1
reslo = xlo-ylo
if (yhi > xhi):
xhi += 1 << 16
reshi = xhi-yhi
reshi = reshi & 0xFFFF
return (reshi, reslo)
def calcChecksum(data):
if (strlen(data) % 4):
data += str_repeat("\0", (4-(len(data) % 4)))
hi=0x0000
lo=0x0000
for i in range(0, len(data), 4):
hi += (ord(data[i])<<8) + ord(data[i+1])
lo += (ord(data[i+2])<<8) + ord(data[i+3])
hi += lo >> 16
lo = lo & 0xFFFF
hi = hi & 0xFFFF
return (hi, lo)
class TTFontFile:
def __init__(self):
self.maxStrLenRead = 200000 # Maximum size of glyf table to read in as string (otherwise reads each glyph from file)
def getMetrics(self, file):
self.filename = file
self.fh = open(file,'rb')
self._pos = 0
self.charWidths = []
self.glyphPos = {}
self.charToGlyph = {}
self.tables = {}
self.otables = {}
self.ascent = 0
self.descent = 0
self.TTCFonts = {}
self.version = version = self.read_ulong()
if (version==0x4F54544F):
die("Postscript outlines are not supported")
if (version==0x74746366):
die("ERROR - TrueType Fonts Collections not supported")
if (version not in (0x00010000,0x74727565)):
die("Not a TrueType font: version=" + version)
self.readTableDirectory()
self.extractInfo()
self.fh.close()
def readTableDirectory(self, ):
self.numTables = self.read_ushort()
self.searchRange = self.read_ushort()
self.entrySelector = self.read_ushort()
self.rangeShift = self.read_ushort()
self.tables = {}
for i in range(self.numTables):
record = {}
record['tag'] = self.read_tag()
record['checksum'] = (self.read_ushort(),self.read_ushort())
record['offset'] = self.read_ulong()
record['length'] = self.read_ulong()
self.tables[record['tag']] = record
def get_table_pos(self, tag):
offset = self.tables[tag]['offset']
length = self.tables[tag]['length']
return (offset, length)
def seek(self, pos):
self._pos = pos
self.fh.seek(self._pos)
def skip(self, delta):
self._pos = self._pos + delta
self.fh.seek(self._pos)
def seek_table(self, tag, offset_in_table = 0):
tpos = self.get_table_pos(tag)
self._pos = tpos[0] + offset_in_table
self.fh.seek(self._pos)
return self._pos
def read_tag(self):
self._pos += 4
return self.fh.read(4)
def read_short(self):
self._pos += 2
s = self.fh.read(2)
a = (ord(s[0])<<8) + ord(s[1])
if (a & (1 << 15) ):
a = (a - (1 << 16))
return a
def unpack_short(self, s):
a = (ord(s[0])<<8) + ord(s[1])
if (a & (1 << 15) ):
a = (a - (1 << 16))
return a
def read_ushort(self):
self._pos += 2
s = self.fh.read(2)
return (ord(s[0])<<8) + ord(s[1])
def read_ulong(self):
self._pos += 4
s = self.fh.read(4)
# if large uInt32 as an integer, PHP converts it to -ve
return (ord(s[0])*16777216) + (ord(s[1])<<16) + (ord(s[2])<<8) + ord(s[3]) # 16777216 = 1<<24
def get_ushort(self, pos):
self.fh.seek(pos)
s = self.fh.read(2)
return (ord(s[0])<<8) + ord(s[1])
def get_ulong(self, pos):
self.fh.seek(pos)
s = self.fh.read(4)
# iF large uInt32 as an integer, PHP converts it to -ve
return (ord(s[0])*16777216) + (ord(s[1])<<16) + (ord(s[2])<<8) + ord(s[3]) # 16777216 = 1<<24
def pack_short(self, val):
if (val<0):
val = abs(val)
val = ~val
val += 1
return pack(">H",val)
def splice(self, stream, offset, value):
return substr(stream,0,offset) + value + substr(stream,offset+strlen(value))
def _set_ushort(self, stream, offset, value):
up = pack(">H", value)
return self.splice(stream, offset, up)
def _set_short(self, stream, offset, val):
if (val<0):
val = abs(val)
val = ~val
val += 1
up = pack(">H",val)
return self.splice(stream, offset, up)
def get_chunk(self, pos, length):
self.fh.seek(pos)
if (length <1): return ''
return (self.fh.read(length))
def get_table(self, tag):
(pos, length) = self.get_table_pos(tag)
if (length == 0):
die('Truetype font (' + self.filename + '): error reading table: ' + tag)
self.fh.seek(pos)
return (self.fh.read(length))
def add(self, tag, data):
if (tag == 'head') :
data = self.splice(data, 8, "\0\0\0\0")
self.otables[tag] = data
############################################/
############################################/
############################################/
def extractInfo(self):
#################/
# name - Naming table
#################/
self.sFamilyClass = 0
self.sFamilySubClass = 0
name_offset = self.seek_table("name")
format = self.read_ushort()
if (format != 0):
die("Unknown name table format " + format)
numRecords = self.read_ushort()
string_data_offset = name_offset + self.read_ushort()
names = {1:'',2:'',3:'',4:'',6:''}
K = names.keys()
nameCount = len(names)
for i in range(numRecords):
platformId = self.read_ushort()
encodingId = self.read_ushort()
languageId = self.read_ushort()
nameId = self.read_ushort()
length = self.read_ushort()
offset = self.read_ushort()
if (nameId not in K): continue
N = ''
if (platformId == 3 and encodingId == 1 and languageId == 0x409): # Microsoft, Unicode, US English, PS Name
opos = self._pos
self.seek(string_data_offset + offset)
if (length % 2 != 0):
die("PostScript name is UTF-16BE string of odd length")
length /= 2
N = ''
while (length > 0):
char = self.read_ushort()
N += (chr(char))
length -= 1
self._pos = opos
self.seek(opos)
elif (platformId == 1 and encodingId == 0 and languageId == 0): # Macintosh, Roman, English, PS Name
opos = self._pos
N = self.get_chunk(string_data_offset + offset, length)
self._pos = opos
self.seek(opos)
if (N and names[nameId]==''):
names[nameId] = N
nameCount -= 1
if (nameCount==0): break
if (names[6]):
psName = names[6]
elif (names[4]):
psName = re.sub(' ','-',names[4])
elif (names[1]):
psName = re.sub(' ','-',names[1])
else:
psName = ''
if (not psName):
die("Could not find PostScript font name")
self.name = psName
if (names[1]):
self.familyName = names[1]
else:
self.familyName = psName
if (names[2]):
self.styleName = names[2]
else:
self.styleName = 'Regular'
if (names[4]):
self.fullName = names[4]
else:
self.fullName = psName
if (names[3]):
self.uniqueFontID = names[3]
else:
self.uniqueFontID = psName
if (names[6]):
self.fullName = names[6]
#################/
# head - Font header table
#################/
self.seek_table("head")
self.skip(18)
self.unitsPerEm = unitsPerEm = self.read_ushort()
scale = 1000 / float(unitsPerEm)
self.skip(16)
xMin = self.read_short()
yMin = self.read_short()
xMax = self.read_short()
yMax = self.read_short()
self.bbox = [(xMin*scale), (yMin*scale), (xMax*scale), (yMax*scale)]
self.skip(3*2)
indexToLocFormat = self.read_ushort()
glyphDataFormat = self.read_ushort()
if (glyphDataFormat != 0):
die('Unknown glyph data format ' + glyphDataFormat)
#################/
# hhea metrics table
#################/
# ttf2t1 seems to use this value rather than the one in OS/2 - so put in for compatibility
if ("hhea" in self.tables):
self.seek_table("hhea")
self.skip(4)
hheaAscender = self.read_short()
hheaDescender = self.read_short()
self.ascent = (hheaAscender *scale)
self.descent = (hheaDescender *scale)
#################/
# OS/2 - OS/2 and Windows metrics table
#################/
if ("OS/2" in self.tables):
self.seek_table("OS/2")
version = self.read_ushort()
self.skip(2)
usWeightClass = self.read_ushort()
self.skip(2)
fsType = self.read_ushort()
if (fsType == 0x0002 or (fsType & 0x0300) != 0):
die('ERROR - Font file ' + self.filename + ' cannot be embedded due to copyright restrictions.')
self.restrictedUse = True
self.skip(20)
sF = self.read_short()
self.sFamilyClass = (sF >> 8)
self.sFamilySubClass = (sF & 0xFF)
self._pos += 10 #PANOSE = 10 byte length
panose = self.fh.read(10)
self.skip(26)
sTypoAscender = self.read_short()
sTypoDescender = self.read_short()
if (not self.ascent):
self.ascent = (sTypoAscender*scale)
if (not self.descent):
self.descent = (sTypoDescender*scale)
if (version > 1):
self.skip(16)
sCapHeight = self.read_short()
self.capHeight = (sCapHeight*scale)
else:
self.capHeight = self.ascent
else:
usWeightClass = 500
if (not self.ascent): self.ascent = (yMax*scale)
if (not self.descent): self.descent = (yMin*scale)
self.capHeight = self.ascent
self.stemV = 50 + int(pow((usWeightClass / 65.0),2))
#################/
# post - PostScript table
#################/
self.seek_table("post")
self.skip(4)
self.italicAngle = self.read_short() + self.read_ushort() / 65536.0
self.underlinePosition = self.read_short() * scale
self.underlineThickness = self.read_short() * scale
isFixedPitch = self.read_ulong()
self.flags = 4
if (self.italicAngle!= 0):
self.flags = self.flags | 64
if (usWeightClass >= 600):
self.flags = self.flags | 262144
if (isFixedPitch):
self.flags = self.flags | 1
#################/
# hhea - Horizontal header table
#################/
self.seek_table("hhea")
self.skip(32)
metricDataFormat = self.read_ushort()
if (metricDataFormat != 0):
die('Unknown horizontal metric data format '.metricDataFormat)
numberOfHMetrics = self.read_ushort()
if (numberOfHMetrics == 0):
die('Number of horizontal metrics is 0')
#################/
# maxp - Maximum profile table
#################/
self.seek_table("maxp")
self.skip(4)
numGlyphs = self.read_ushort()
#################/
# cmap - Character to glyph index mapping table
#################/
cmap_offset = self.seek_table("cmap")
self.skip(2)
cmapTableCount = self.read_ushort()
unicode_cmap_offset = 0
unicode_cmap_offset12 = 0
for i in range(cmapTableCount):
platformID = self.read_ushort()
encodingID = self.read_ushort()
offset = self.read_ulong()
save_pos = self._pos
if platformID == 3 and encodingID == 10: # Microsoft, UCS-4
format = self.get_ushort(cmap_offset + offset)
if (format == 12):
if not unicode_cmap_offset12:
unicode_cmap_offset12 = cmap_offset + offset
break
if ((platformID == 3 and encodingID == 1) or platformID == 0): # Microsoft, Unicode
format = self.get_ushort(cmap_offset + offset)
if (format == 4):
if (not unicode_cmap_offset):
unicode_cmap_offset = cmap_offset + offset
break
self.seek(save_pos)
if not unicode_cmap_offset and not unicode_cmap_offset12:
die('Font (' + self.filename + ') does not have cmap for Unicode (platform 3, encoding 1, format 4, or platform 3, encoding 10, format 12, or platform 0, any encoding, format 4)')
glyphToChar = {}
charToGlyph = {}
if unicode_cmap_offset12:
self.getCMAP12(unicode_cmap_offset12, glyphToChar, charToGlyph)
else:
self.getCMAP4(unicode_cmap_offset, glyphToChar, charToGlyph)
#################/
# hmtx - Horizontal metrics table
#################/
self.getHMTX(numberOfHMetrics, numGlyphs, glyphToChar, scale)
############################################/
############################################/
def makeSubset(self, file, subset):
self.filename = file
self.fh = open(file ,'rb')
self._pos = 0
self.charWidths = []
self.glyphPos = {}
self.charToGlyph = {}
self.tables = {}
self.otables = {}
self.ascent = 0
self.descent = 0
self.skip(4)
self.maxUni = 0
self.readTableDirectory()
#################/
# head - Font header table
#################/
self.seek_table("head")
self.skip(50)
indexToLocFormat = self.read_ushort()
glyphDataFormat = self.read_ushort()
#################/
# hhea - Horizontal header table
#################/
self.seek_table("hhea")
self.skip(32)
metricDataFormat = self.read_ushort()
orignHmetrics = numberOfHMetrics = self.read_ushort()
#################/
# maxp - Maximum profile table
#################/
self.seek_table("maxp")
self.skip(4)
numGlyphs = self.read_ushort()
#################/
# cmap - Character to glyph index mapping table
#################/
cmap_offset = self.seek_table("cmap")
self.skip(2)
cmapTableCount = self.read_ushort()
unicode_cmap_offset = 0
unicode_cmap_offset12 = 0
for i in range(cmapTableCount):
platformID = self.read_ushort()
encodingID = self.read_ushort()
offset = self.read_ulong()
save_pos = self._pos
if platformID == 3 and encodingID == 10: # Microsoft, UCS-4
format = self.get_ushort(cmap_offset + offset)
if (format == 12):
if not unicode_cmap_offset12:
unicode_cmap_offset12 = cmap_offset + offset
break
if ((platformID == 3 and encodingID == 1) or platformID == 0): # Microsoft, Unicode
format = self.get_ushort(cmap_offset + offset)
if (format == 4):
unicode_cmap_offset = cmap_offset + offset
break
self.seek(save_pos )
if not unicode_cmap_offset and not unicode_cmap_offset12:
die('Font (' + self.filename + ') does not have cmap for Unicode (platform 3, encoding 1, format 4, or platform 3, encoding 10, format 12, or platform 0, any encoding, format 4)')
glyphToChar = {}
charToGlyph = {}
if unicode_cmap_offset12:
self.getCMAP12(unicode_cmap_offset12, glyphToChar, charToGlyph)
else:
self.getCMAP4(unicode_cmap_offset, glyphToChar, charToGlyph)
self.charToGlyph = charToGlyph
#################/
# hmtx - Horizontal metrics table
#################/
scale = 1 # not used
self.getHMTX(numberOfHMetrics, numGlyphs, glyphToChar, scale)
#################/
# loca - Index to location
#################/
self.getLOCA(indexToLocFormat, numGlyphs)
subsetglyphs = [(0, 0)] # special "sorted dict"!
subsetCharToGlyph = {}
for code in subset:
if (code in self.charToGlyph):
if (self.charToGlyph[code], code) not in subsetglyphs:
subsetglyphs.append((self.charToGlyph[code], code)) # Old Glyph ID => Unicode
subsetCharToGlyph[code] = self.charToGlyph[code] # Unicode to old GlyphID
self.maxUni = max(self.maxUni, code)
(start,dummy) = self.get_table_pos('glyf')
subsetglyphs.sort()
glyphSet = {}
n = 0
fsLastCharIndex = 0 # maximum Unicode index (character code) in this font, according to the cmap subtable for platform ID 3 and platform- specific encoding ID 0 or 1.
for originalGlyphIdx, uni in subsetglyphs:
fsLastCharIndex = max(fsLastCharIndex , uni)
glyphSet[originalGlyphIdx] = n # old glyphID to new glyphID
n += 1
codeToGlyph = {}
for uni, originalGlyphIdx in sorted(subsetCharToGlyph.items()):
codeToGlyph[uni] = glyphSet[originalGlyphIdx]
self.codeToGlyph = codeToGlyph
for originalGlyphIdx, uni in subsetglyphs:
nonlocals = {'start': start, 'glyphSet': glyphSet,
'subsetglyphs': subsetglyphs}
self.getGlyphs(originalGlyphIdx, nonlocals)
numGlyphs = numberOfHMetrics = len(subsetglyphs)
#tables copied from the original
tags = ['name']
for tag in tags:
self.add(tag, self.get_table(tag))
tags = ['cvt ', 'fpgm', 'prep', 'gasp']
for tag in tags:
if (tag in self.tables):
self.add(tag, self.get_table(tag))
# post - PostScript
opost = self.get_table('post')
post = "\x00\x03\x00\x00" + substr(opost,4,12) + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
self.add('post', post)
# Sort CID2GID map into segments of contiguous codes
if 0 in codeToGlyph:
del codeToGlyph[0]
#unset(codeToGlyph[65535])
rangeid = 0
range_ = {}
prevcid = -2
prevglidx = -1
# for each character
for cid, glidx in sorted(codeToGlyph.items()):
if (cid == (prevcid + 1) and glidx == (prevglidx + 1)):
range_[rangeid].append(glidx)
else:
# new range
rangeid = cid
range_[rangeid] = []
range_[rangeid].append(glidx)
prevcid = cid
prevglidx = glidx
# cmap - Character to glyph mapping - Format 4 (MS / )
segCount = len(range_) + 1 # + 1 Last segment has missing character 0xFFFF
searchRange = 1
entrySelector = 0
while (searchRange * 2 <= segCount ):
searchRange = searchRange * 2
entrySelector = entrySelector + 1
searchRange = searchRange * 2
rangeShift = segCount * 2 - searchRange
length = 16 + (8*segCount ) + (numGlyphs+1)
cmap = [0, 1, # Index : version, number of encoding subtables
3, 1, # Encoding Subtable : platform (MS=3), encoding (Unicode)
0, 12, # Encoding Subtable : offset (hi,lo)
4, length, 0, # Format 4 Mapping subtable: format, length, language
segCount*2,
searchRange,
entrySelector,
rangeShift]
range_ = sorted(range_.items())
# endCode(s)
for start, subrange in range_:
endCode = start + (len(subrange)-1)
cmap.append(endCode) # endCode(s)
cmap.append(0xFFFF) # endCode of last Segment
cmap.append(0) # reservedPad
# startCode(s)
for start, subrange in range_:
cmap.append(start) # startCode(s)
cmap.append(0xFFFF) # startCode of last Segment
# idDelta(s)
for start, subrange in range_:
idDelta = -(start-subrange[0])
n += count(subrange)
cmap.append(idDelta) # idDelta(s)
cmap.append(1) # idDelta of last Segment
# idRangeOffset(s)
for subrange in range_:
cmap.append(0) # idRangeOffset[segCount] Offset in bytes to glyph indexArray, or 0
cmap.append(0) # idRangeOffset of last Segment
for subrange, glidx in range_:
cmap.extend(glidx)
cmap.append(0) # Mapping for last character
cmapstr = ''
for cm in cmap:
if cm >= 0:
cmapstr += pack(">H", cm)
else:
try:
cmapstr += pack(">h", cm)
except:
warnings.warn("cmap value too big/small: %s" % cm)
cmapstr += pack(">H", -cm)
self.add('cmap', cmapstr)
# glyf - Glyph data
(glyfOffset,glyfLength) = self.get_table_pos('glyf')
if (glyfLength < self.maxStrLenRead):
glyphData = self.get_table('glyf')
offsets = []
glyf = ''
pos = 0
hmtxstr = ''
xMinT = 0
yMinT = 0
xMaxT = 0
yMaxT = 0
advanceWidthMax = 0
minLeftSideBearing = 0
minRightSideBearing = 0
xMaxExtent = 0
maxPoints = 0 # points in non-compound glyph
maxContours = 0 # contours in non-compound glyph
maxComponentPoints = 0 # points in compound glyph
maxComponentContours = 0 # contours in compound glyph
maxComponentElements = 0 # number of glyphs referenced at top level
maxComponentDepth = 0 # levels of recursion, set to 0 if font has only simple glyphs
self.glyphdata = {}
for originalGlyphIdx, uni in subsetglyphs:
# hmtx - Horizontal Metrics
hm = self.getHMetric(orignHmetrics, originalGlyphIdx)
hmtxstr += hm
offsets.append(pos)
try:
glyphPos = self.glyphPos[originalGlyphIdx]
glyphLen = self.glyphPos[originalGlyphIdx + 1] - glyphPos
except IndexError:
warnings.warn("missing glyph %s" % (originalGlyphIdx))
glyphLen = 0
if (glyfLength < self.maxStrLenRead):
data = substr(glyphData,glyphPos,glyphLen)
else:
if (glyphLen > 0):
data = self.get_chunk(glyfOffset+glyphPos,glyphLen)
else:
data = ''
if (glyphLen > 0):
up = unpack(">H", substr(data,0,2))[0]
if (glyphLen > 2 and (up & (1 << 15)) ): # If number of contours <= -1 i.e. composiste glyph
pos_in_glyph = 10
flags = GF_MORE
nComponentElements = 0
while (flags & GF_MORE):
nComponentElements += 1 # number of glyphs referenced at top level
up = unpack(">H", substr(data,pos_in_glyph,2))
flags = up[0]
up = unpack(">H", substr(data,pos_in_glyph+2,2))
glyphIdx = up[0]
self.glyphdata.setdefault(originalGlyphIdx, {}).setdefault('compGlyphs', []).append(glyphIdx)
try:
data = self._set_ushort(data, pos_in_glyph + 2, glyphSet[glyphIdx])
except KeyError:
data = 0
warnings.warn("missing glyph data %s" % glyphIdx)
pos_in_glyph += 4
if (flags & GF_WORDS):
pos_in_glyph += 4
else:
pos_in_glyph += 2
if (flags & GF_SCALE):
pos_in_glyph += 2
elif (flags & GF_XYSCALE):
pos_in_glyph += 4
elif (flags & GF_TWOBYTWO):
pos_in_glyph += 8
maxComponentElements = max(maxComponentElements, nComponentElements)
glyf += data
pos += glyphLen
if (pos % 4 != 0):
padding = 4 - (pos % 4)
glyf += str_repeat("\0",padding)
pos += padding
offsets.append(pos)
self.add('glyf', glyf)
# hmtx - Horizontal Metrics
self.add('hmtx', hmtxstr)
# loca - Index to location
locastr = ''
if (((pos + 1) >> 1) > 0xFFFF):
indexToLocFormat = 1 # long format
for offset in offsets:
locastr += pack(">L",offset)
else:
indexToLocFormat = 0 # short format
for offset in offsets:
locastr += pack(">H",(offset/2))
self.add('loca', locastr)
# head - Font header
head = self.get_table('head')
head = self._set_ushort(head, 50, indexToLocFormat)
self.add('head', head)
# hhea - Horizontal Header
hhea = self.get_table('hhea')
hhea = self._set_ushort(hhea, 34, numberOfHMetrics)
self.add('hhea', hhea)
# maxp - Maximum Profile
maxp = self.get_table('maxp')
maxp = self._set_ushort(maxp, 4, numGlyphs)
self.add('maxp', maxp)
# OS/2 - OS/2
os2 = self.get_table('OS/2')
self.add('OS/2', os2 )
self.fh.close()
# Put the TTF file together
stm = self.endTTFile('')
return stm
#########################################
# Recursively get composite glyph data
def getGlyphData(self, originalGlyphIdx, nonlocals):
# &maxdepth, &depth, &points, &contours
nonlocals['depth'] += 1
nonlocals['maxdepth'] = max(nonlocals['maxdepth'], nonlocals['depth'])
if (len(self.glyphdata[originalGlyphIdx]['compGlyphs'])):
for glyphIdx in self.glyphdata[originalGlyphIdx]['compGlyphs']:
self.getGlyphData(glyphIdx, nonlocals)
elif ((self.glyphdata[originalGlyphIdx]['nContours'] > 0) and nonlocals['depth'] > 0): # simple
contours += self.glyphdata[originalGlyphIdx]['nContours']
points += self.glyphdata[originalGlyphIdx]['nPoints']
nonlocals['depth'] -= 1
#########################################
# Recursively get composite glyphs
def getGlyphs(self, originalGlyphIdx, nonlocals):
# &start, &glyphSet, &subsetglyphs)
try:
glyphPos = self.glyphPos[originalGlyphIdx]
glyphLen = self.glyphPos[originalGlyphIdx + 1] - glyphPos
except IndexError:
warnings.warn("missing glyph %s" % (originalGlyphIdx))
return
if (not glyphLen):
return
self.seek(nonlocals['start'] + glyphPos)
numberOfContours = self.read_short()
if (numberOfContours < 0):
self.skip(8)
flags = GF_MORE
while (flags & GF_MORE):
flags = self.read_ushort()
glyphIdx = self.read_ushort()
if (glyphIdx not in nonlocals['glyphSet']):
nonlocals['glyphSet'][glyphIdx] = len(nonlocals['subsetglyphs']) # old glyphID to new glyphID
nonlocals['subsetglyphs'].append((glyphIdx, 1))
savepos = self.fh.tell()
self.getGlyphs(glyphIdx, nonlocals)
self.seek(savepos)
if (flags & GF_WORDS):
self.skip(4)
else:
self.skip(2)
if (flags & GF_SCALE):
self.skip(2)
elif (flags & GF_XYSCALE):
self.skip(4)
elif (flags & GF_TWOBYTWO):
self.skip(8)
#########################################
def getHMTX(self, numberOfHMetrics, numGlyphs, glyphToChar, scale):
start = self.seek_table("hmtx")
aw = 0
self.charWidths = [0] * 256*256*2
nCharWidths = 0
if ((numberOfHMetrics*4) < self.maxStrLenRead):
data = self.get_chunk(start,(numberOfHMetrics*4))
arr = unpack(">" + "H" * (len(data)/2), data)
else:
self.seek(start)
for glyph in range(numberOfHMetrics):
if ((numberOfHMetrics*4) < self.maxStrLenRead):
aw = arr[(glyph*2)] # PHP starts arrays from index 0!? +1
else:
aw = self.read_ushort()
lsb = self.read_ushort()
if (glyph in glyphToChar or glyph == 0):
if (aw >= (1 << 15) ):
aw = 0 # 1.03 Some (arabic) fonts have -ve values for width
# although should be unsigned value - comes out as e.g. 65108 (intended -50)
if (glyph == 0):
self.defaultWidth = scale*aw
continue
for char in glyphToChar[glyph]:
if (char != 0 and char != 65535):
w = int(round(scale*aw))
if (w == 0): w = 65535
if (char < 196608):
self.charWidths[char] = w
nCharWidths += 1
data = self.get_chunk((start+numberOfHMetrics*4),(numGlyphs*2))
arr = unpack(">" + "H" * (len(data)/2), data)
diff = numGlyphs-numberOfHMetrics
for pos in range(diff):
glyph = pos + numberOfHMetrics
if (glyph in glyphToChar):
for char in glyphToChar[glyph]:
if (char != 0 and char != 65535):
w = int(round(scale*aw))
if (w == 0): w = 65535
if (char < 196608):
self.charWidths[char] = w
nCharWidths += 1
# NB 65535 is a set width of 0
# First bytes define number of chars in font
self.charWidths[0] = nCharWidths
def getHMetric(self, numberOfHMetrics, gid):
start = self.seek_table("hmtx")
if (gid < numberOfHMetrics):
self.seek(start+(gid*4))
hm = self.fh.read(4)
else:
self.seek(start+((numberOfHMetrics-1)*4))
hm = self.fh.read(2)
self.seek(start+(numberOfHMetrics*2)+(gid*2))
hm += self.fh.read(2)
return hm
def getLOCA(self, indexToLocFormat, numGlyphs):
start = self.seek_table('loca')
self.glyphPos = []
if (indexToLocFormat == 0):
data = self.get_chunk(start,(numGlyphs*2)+2)
arr = unpack(">" + "H" * (len(data)/2), data)
for n in range(numGlyphs):
self.glyphPos.append((arr[n] * 2)) # n+1 !?
elif (indexToLocFormat == 1):
data = self.get_chunk(start,(numGlyphs*4)+4)
arr = unpack(">" + "L" * (len(data)/4), data)
for n in range(numGlyphs):
self.glyphPos.append((arr[n])) # n+1 !?
else:
die('Unknown location table format ' + indexToLocFormat)
# CMAP Format 4
def getCMAP4(self, unicode_cmap_offset, glyphToChar, charToGlyph):
self.maxUniChar = 0
self.seek(unicode_cmap_offset + 2)
length = self.read_ushort()
limit = unicode_cmap_offset + length
self.skip(2)
segCount = self.read_ushort() / 2
self.skip(6)
endCount = []
for i in range(segCount):
endCount.append(self.read_ushort())
self.skip(2)
startCount = []
for i in range(segCount):
startCount.append(self.read_ushort())
idDelta = []
for i in range(segCount):
idDelta.append(self.read_short()) # ???? was unsigned short
idRangeOffset_start = self._pos
idRangeOffset = []
for i in range(segCount):
idRangeOffset.append(self.read_ushort())
for n in range(segCount):
endpoint = (endCount[n] + 1)
for unichar in range(startCount[n], endpoint, 1):
if (idRangeOffset[n] == 0):
glyph = (unichar + idDelta[n]) & 0xFFFF
else:
offset = (unichar - startCount[n]) * 2 + idRangeOffset[n]
offset = idRangeOffset_start + 2 * n + offset
if (offset >= limit):
glyph = 0
else:
glyph = self.get_ushort(offset)
if (glyph != 0):
glyph = (glyph + idDelta[n]) & 0xFFFF
charToGlyph[unichar] = glyph
if (unichar < 196608):
self.maxUniChar = max(unichar,self.maxUniChar)
glyphToChar.setdefault(glyph, []).append(unichar)
# CMAP Format 12
def getCMAP12(self, unicode_cmap_offset, glyphToChar, charToGlyph):
self.maxUniChar = 0
# table (skip format version, should be 12)
self.seek(unicode_cmap_offset + 2)
# reserved
self.skip(2)
# table length
length = self.read_ulong()
# language (should be 0)
self.skip(4)
# groups count
grpCount = self.read_ulong()
if 2 + 2 + 4 + 4 + 4 + grpCount * 3 * 4 > length:
die("TTF format 12 cmap table too small")
for n in range(grpCount):
startCharCode = self.read_ulong()
endCharCode = self.read_ulong()
glyph = self.read_ulong()
for unichar in range(startCharCode, endCharCode + 1):
charToGlyph[unichar] = glyph
if (unichar < 196608):
self.maxUniChar = max(unichar, self.maxUniChar)
glyphToChar.setdefault(glyph, []).append(unichar)
glyph += 1
# Put the TTF file together
def endTTFile(self, stm):
stm = ''
numTables = count(self.otables)
searchRange = 1
entrySelector = 0
while (searchRange * 2 <= numTables):
searchRange = searchRange * 2
entrySelector = entrySelector + 1
searchRange = searchRange * 16
rangeShift = numTables * 16 - searchRange
# Header
if (_TTF_MAC_HEADER):
stm += (pack(">LHHHH", 0x74727565, numTables, searchRange, entrySelector, rangeShift)) # Mac
else:
stm += (pack(">LHHHH", 0x00010000 , numTables, searchRange, entrySelector, rangeShift)) # Windows
# Table directory
tables = self.otables
offset = 12 + numTables * 16
sorted_tables = sorted(tables.items())
for tag, data in sorted_tables:
if (tag == 'head'):
head_start = offset
stm += tag
checksum = calcChecksum(data)
stm += pack(">HH", checksum[0],checksum[1])
stm += pack(">LL", offset, strlen(data))
paddedLength = (strlen(data)+3)&~3
offset = offset + paddedLength
# Table data
for tag, data in sorted_tables:
data += "\0\0\0"
stm += substr(data,0,(strlen(data)&~3))
checksum = calcChecksum(stm)
checksum = sub32((0xB1B0,0xAFBA), checksum)
chk = pack(">HH", checksum[0],checksum[1])
stm = self.splice(stm,(head_start + 8),chk)
return stm
if __name__ == '__main__':
ttf = TTFontFile()
ttffile = 'DejaVuSansCondensed.ttf';
ttf.getMetrics(ttffile)
# test basic metrics:
assert round(ttf.descent, 0) == -236
assert round(ttf.capHeight, 0) == 928
assert ttf.flags == 4
assert [round(i, 0) for i in ttf.bbox] == [-918, -415, 1513, 1167]
assert ttf.italicAngle == 0
assert ttf.stemV == 87
assert round(ttf.defaultWidth, 0) == 540
assert round(ttf.underlinePosition, 0) == -63
assert round(ttf.underlineThickness, 0) == 44
# test char widths 8(against binary file generated by tfpdf.php):
assert ''.join(ttf.charWidths) == open("dejavusanscondensed.cw.dat").read()
| Python |
# -*- coding: iso-8859-1 -*-
"PDF Template Helper for FPDF.py"
__author__ = "Mariano Reingart <reingart@gmail.com>"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
__license__ = "LGPL 3.0"
import sys,os,csv
from fpdf import FPDF
def rgb(col):
return (col // 65536), (col // 256 % 256), (col% 256)
class Template:
def __init__(self, infile=None, elements=None, format='A4', orientation='portrait',
title='', author='', subject='', creator='', keywords=''):
if elements:
self.elements = elements
self.keys = [v['name'].lower() for v in self.elements]
self.handlers = {'T': self.text, 'L': self.line, 'I': self.image,
'B': self.rect, 'BC': self.barcode, }
self.pg_no = 0
self.texts = {}
pdf = self.pdf = FPDF(format=format,orientation=orientation, unit="mm")
pdf.set_title(title)
pdf.set_author(author)
pdf.set_creator(creator)
pdf.set_subject(subject)
pdf.set_keywords(keywords)
def parse_csv(self, infile, delimiter=",", decimal_sep="."):
"Parse template format csv file and create elements dict"
keys = ('name','type','x1','y1','x2','y2','font','size',
'bold','italic','underline','foreground','background',
'align','text','priority', 'multiline')
self.elements = []
for row in csv.reader(open(infile, 'rb'), delimiter=delimiter):
kargs = {}
for i,v in enumerate(row):
if not v.startswith("'") and decimal_sep!=".":
v = v.replace(decimal_sep,".")
else:
v = v
if v=='':
v = None
else:
v = eval(v.strip())
kargs[keys[i]] = v
self.elements.append(kargs)
self.keys = [v['name'].lower() for v in self.elements]
def add_page(self):
self.pg_no += 1
self.texts[self.pg_no] = {}
def __setitem__(self, name, value):
if self.has_key(name):
if isinstance(value,unicode):
value = value.encode("latin1","ignore")
elif value is None:
value = ""
else:
value = str(value)
self.texts[self.pg_no][name.lower()] = value
# setitem shortcut (may be further extended)
set = __setitem__
def has_key(self, name):
return name.lower() in self.keys
def __getitem__(self, name):
if self.has_key(name):
key = name.lower()
if key in self.texts:
# text for this page:
return self.texts[self.pg_no][key]
else:
# find first element for default text:
elements = [element for element in self.elements
if element['name'].lower() == key]
if elements:
return elements[0]['text']
def split_multicell(self, text, element_name):
"Divide (\n) a string using a given element width"
pdf = self.pdf
element = [element for element in self.elements
if element['name'].lower() == element_name.lower()][0]
style = ""
if element['bold']: style += "B"
if element['italic']: style += "I"
if element['underline']: style += "U"
pdf.set_font(element['font'],style,element['size'])
align = {'L':'L','R':'R','I':'L','D':'R','C':'C','':''}.get(element['align']) # D/I in spanish
if isinstance(text, unicode):
text = text.encode("latin1","ignore")
else:
text = str(text)
return pdf.multi_cell(w=element['x2']-element['x1'],
h=element['y2']-element['y1'],
txt=text,align=align,split_only=True)
def render(self, outfile, dest="F"):
pdf = self.pdf
for pg in range(1, self.pg_no+1):
pdf.add_page()
pdf.set_font('Arial','B',16)
pdf.set_auto_page_break(False,margin=0)
for element in sorted(self.elements,key=lambda x: x['priority']):
#print "dib",element['type'], element['name'], element['x1'], element['y1'], element['x2'], element['y2']
element = element.copy()
element['text'] = self.texts[pg].get(element['name'].lower(), element['text'])
if 'rotate' in element:
pdf.rotate(element['rotate'], element['x1'], element['y1'])
self.handlers[element['type'].upper()](pdf, **element)
if 'rotate' in element:
pdf.rotate(0)
return pdf.output(outfile, dest)
def text(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', font="arial", size=10,
bold=False, italic=False, underline=False, align="",
foreground=0, backgroud=65535, multiline=None,
*args, **kwargs):
if text:
if pdf.text_color!=rgb(foreground):
pdf.set_text_color(*rgb(foreground))
if pdf.fill_color!=rgb(backgroud):
pdf.set_fill_color(*rgb(backgroud))
font = font.strip().lower()
if font == 'arial black':
font = 'arial'
style = ""
for tag in 'B', 'I', 'U':
if (text.startswith("<%s>" % tag) and text.endswith("</%s>" %tag)):
text = text[3:-4]
style += tag
if bold: style += "B"
if italic: style += "I"
if underline: style += "U"
align = {'L':'L','R':'R','I':'L','D':'R','C':'C','':''}.get(align) # D/I in spanish
pdf.set_font(font,style,size)
##m_k = 72 / 2.54
##h = (size/m_k)
pdf.set_xy(x1,y1)
if multiline is None:
# multiline==None: write without wrapping/trimming (default)
pdf.cell(w=x2-x1,h=y2-y1,txt=text,border=0,ln=0,align=align)
elif multiline:
# multiline==True: automatic word - warp
pdf.multi_cell(w=x2-x1,h=y2-y1,txt=text,border=0,align=align)
else:
# multiline==False: trim to fit exactly the space defined
text = pdf.multi_cell(w=x2-x1, h=y2-y1,
txt=text, align=align, split_only=True)[0]
print "trimming: *%s*" % text
pdf.cell(w=x2-x1,h=y2-y1,txt=text,border=0,ln=0,align=align)
#pdf.Text(x=x1,y=y1,txt=text)
def line(self, pdf, x1=0, y1=0, x2=0, y2=0, size=0, foreground=0, *args, **kwargs):
if pdf.draw_color!=rgb(foreground):
#print "SetDrawColor", hex(foreground)
pdf.set_draw_color(*rgb(foreground))
#print "SetLineWidth", size
pdf.set_line_width(size)
pdf.line(x1, y1, x2, y2)
def rect(self, pdf, x1=0, y1=0, x2=0, y2=0, size=0, foreground=0, backgroud=65535, *args, **kwargs):
if pdf.draw_color!=rgb(foreground):
pdf.set_draw_color(*rgb(foreground))
if pdf.fill_color!=rgb(backgroud):
pdf.set_fill_color(*rgb(backgroud))
pdf.set_line_width(size)
pdf.rect(x1, y1, x2-x1, y2-y1)
def image(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', *args,**kwargs):
pdf.image(text,x1,y1,w=x2-x1,h=y2-y1,type='',link='')
def barcode(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', font="arial", size=1,
foreground=0, *args, **kwargs):
if pdf.draw_color!=rgb(foreground):
pdf.set_draw_color(*rgb(foreground))
font = font.lower().strip()
if font == 'interleaved 2of5 nt':
pdf.interleaved2of5(text,x1,y1,w=size,h=y2-y1)
if __name__ == "__main__":
# generate sample invoice (according Argentina's regulations)
import random
from decimal import Decimal
f = Template(format="A4",
title="Sample Invoice", author="Sample Company",
subject="Sample Customer", keywords="Electronic TAX Invoice")
f.parse_csv(infile="invoice.csv", delimiter=";", decimal_sep=",")
detail = "Lorem ipsum dolor sit amet, consectetur. " * 30
items = []
for i in range(1, 30):
ds = "Sample product %s" % i
qty = random.randint(1,10)
price = round(random.random()*100,3)
code = "%s%s%02d" % (chr(random.randint(65,90)), chr(random.randint(65,90)),i)
items.append(dict(code=code, unit='u',
qty=qty, price=price,
amount=qty*price,
ds="%s: %s" % (i,ds)))
# divide and count lines
lines = 0
li_items = []
for it in items:
qty = it['qty']
code = it['code']
unit = it['unit']
for ds in f.split_multicell(it['ds'], 'item_description01'):
# add item description line (without price nor amount)
li_items.append(dict(code=code, ds=ds, qty=qty, unit=unit, price=None, amount=None))
# clean qty and code (show only at first)
unit = qty = code = None
# set last item line price and amount
li_items[-1].update(amount = it['amount'],
price = it['price'])
obs="\n<U>Detail:</U>\n\n" + detail
for ds in f.split_multicell(obs, 'item_description01'):
li_items.append(dict(code=code, ds=ds, qty=qty, unit=unit, price=None, amount=None))
# calculate pages:
lines = len(li_items)
max_lines_per_page = 24
pages = lines / (max_lines_per_page - 1)
if lines % (max_lines_per_page - 1): pages = pages + 1
# completo campos y hojas
for page in range(1, pages+1):
f.add_page()
f['page'] = 'Page %s of %s' % (page, pages)
if pages>1 and page<pages:
s = 'Continues on page %s' % (page+1)
else:
s = ''
f['item_description%02d' % (max_lines_per_page+1)] = s
f["company_name"] = "Sample Company"
f["company_logo"] = "tutorial/logo.png"
f["company_header1"] = "Some Address - somewhere -"
f["company_header2"] = "http://www.example.com"
f["company_footer1"] = "Tax Code ..."
f["company_footer2"] = "Tax/VAT ID ..."
f['number'] = '0001-00001234'
f['issue_date'] = '2010-09-10'
f['due_date'] = '2099-09-10'
f['customer_name'] = "Sample Client"
f['customer_address'] = "Siempreviva 1234"
# print line item...
li = 0
k = 0
total = Decimal("0.00")
for it in li_items:
k = k + 1
if k > page * (max_lines_per_page - 1):
break
if it['amount']:
total += Decimal("%.6f" % it['amount'])
if k > (page - 1) * (max_lines_per_page - 1):
li += 1
if it['qty'] is not None:
f['item_quantity%02d' % li] = it['qty']
if it['code'] is not None:
f['item_code%02d' % li] = it['code']
if it['unit'] is not None:
f['item_unit%02d' % li] = it['unit']
f['item_description%02d' % li] = it['ds']
if it['price'] is not None:
f['item_price%02d' % li] = "%0.3f" % it['price']
if it['amount'] is not None:
f['item_amount%02d' % li] = "%0.2f" % it['amount']
if pages == page:
f['net'] = "%0.2f" % (total/Decimal("1.21"))
f['vat'] = "%0.2f" % (total*(1-1/Decimal("1.21")))
f['total_label'] = 'Total:'
else:
f['total_label'] = 'SubTotal:'
f['total'] = "%0.2f" % total
f.render("./invoice.pdf")
if sys.platform.startswith("linux"):
os.system("evince ./invoice.pdf")
else:
os.system("./invoice.pdf")
| Python |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
# fpdf php helpers:
def substr(s, start, length=-1):
if length < 0:
length=len(s)-start
return s[start:start+length]
def sprintf(fmt, *args): return fmt % args
def print_r(array):
if not isinstance(array, dict):
array = dict([(k, k) for k in array])
for k, v in array.items():
print "[%s] => %s" % (k, v),
def UTF8ToUTF16BE(instr, setbom=True):
"Converts UTF-8 strings to UTF16-BE."
outstr = ""
if (setbom):
outstr += "\xFE\xFF";
if not isinstance(instr, unicode):
instr = instr.decode('UTF-8')
outstr += instr.encode('UTF-16BE')
return outstr
def UTF8StringToArray(instr):
"Converts UTF-8 strings to codepoints array"
return [ord(c) for c in instr]
# ttfints php helpers:
def die(msg):
raise RuntimeError(msg)
def str_repeat(s, count):
return s * count
def str_pad(s, pad_length=0, pad_char= " ", pad_type= +1 ):
if pad_type<0: # pad left
return s.rjust(pad_length, pad_char)
elif pad_type>0: # pad right
return s.ljust(pad_length, pad_char)
else: # pad both
return s.center(pad_length, pad_char)
strlen = count = lambda s: len(s) | Python |
# -*- coding: latin-1 -*-
"HTML Renderer for FPDF.py"
__author__ = "Mariano Reingart <reingart@gmail.com>"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
__license__ = "LGPL 3.0"
# Inspired by tuto5.py and several examples from fpdf.org, html2fpdf, etc.
from fpdf import FPDF
from HTMLParser import HTMLParser
DEBUG = False
def px2mm(px):
return int(px)*25.4/72.0
def hex2dec(color = "#000000"):
if color:
r = int(color[1:3], 16)
g = int(color[3:5], 16)
b = int(color[5:7], 16)
return r, g, b
class HTML2FPDF(HTMLParser):
"Render basic HTML to FPDF"
def __init__(self, pdf, image_map=None):
HTMLParser.__init__(self)
self.style = {}
self.pre = False
self.href = ''
self.align = ''
self.page_links = {}
self.font_list = ("times","courier", "helvetica")
self.font = None
self.font_stack = []
self.pdf = pdf
self.image_map = image_map or (lambda src: src)
self.r = self.g = self.b = 0
self.indent = 0
self.bullet = []
self.set_font("times", 12)
self.font_face = "times" # initialize font
self.color = 0 #initialize font color
self.table = None # table attributes
self.table_col_width = None # column (header) widths
self.table_col_index = None # current column index
self.td = None # cell attributes
self.th = False # header enabled
self.tr = None
self.theader = None # table header cells
self.tfooter = None # table footer cells
self.thead = None
self.tfoot = None
self.theader_out = self.tfooter_out = False
def width2mm(self, length):
if length[-1]=='%':
total = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin
if self.table['width'][-1]=='%':
total *= int(self.table['width'][:-1])/100.0
return int(length[:-1]) * total / 101.0
else:
return int(length) / 6.0
def handle_data(self, txt):
if self.td is not None: # drawing a table?
if 'width' not in self.td and 'colspan' not in self.td:
try:
l = [self.table_col_width[self.table_col_index]]
except IndexError:
raise RuntimeError("Table column/cell width not specified, unable to continue")
elif 'colspan' in self.td:
i = self.table_col_index
colspan = int(self.td['colspan'])
l = self.table_col_width[i:i+colspan]
else:
l = [self.td.get('width','240')]
w = sum([self.width2mm(lenght) for lenght in l])
h = int(self.td.get('height', 0)) / 4 or self.h*1.30
self.table_h = h
border = int(self.table.get('border', 0))
if not self.th:
align = self.td.get('align', 'L')[0].upper()
border = border and 'LR'
else:
self.set_style('B',True)
border = border or 'B'
align = 'C'
bgcolor = hex2dec(self.td.get('bgcolor', self.tr.get('bgcolor', '')))
# parsing table header/footer (drawn later):
if self.thead is not None:
self.theader.append(((w,h,txt,border,0,align), bgcolor))
if self.tfoot is not None:
self.tfooter.append(((w,h,txt,border,0,align), bgcolor))
# check if reached end of page, add table footer and header:
height = h + (self.tfooter and self.tfooter[0][0][1] or 0)
if self.pdf.y+height>self.pdf.page_break_trigger and not self.th:
self.output_table_footer()
self.pdf.add_page()
self.theader_out = self.tfooter_out = False
if self.tfoot is None and self.thead is None:
if not self.theader_out:
self.output_table_header()
self.box_shadow(w, h, bgcolor)
if DEBUG: print "td cell", self.pdf.x, w, txt, "*"
self.pdf.cell(w,h,txt,border,0,align)
elif self.table is not None:
# ignore anything else than td inside a table
pass
elif self.align:
if DEBUG: print "cell", txt, "*"
self.pdf.cell(0,self.h,txt,0,1,self.align[0].upper(), self.href)
else:
txt = txt.replace("\n"," ")
if self.href:
self.put_link(self.href,txt)
else:
if DEBUG: print "write", txt, "*"
self.pdf.write(self.h,txt)
def box_shadow(self, w, h, bgcolor):
if DEBUG: print "box_shadow", w, h, bgcolor
if bgcolor:
fill_color = self.pdf.fill_color
self.pdf.set_fill_color(*bgcolor)
self.pdf.rect(self.pdf.x, self.pdf.y, w, h, 'F')
self.pdf.fill_color = fill_color
def output_table_header(self):
if self.theader:
b = self.b
x = self.pdf.x
self.pdf.set_x(self.table_offset)
self.set_style('B',True)
for cell, bgcolor in self.theader:
self.box_shadow(cell[0], cell[1], bgcolor)
self.pdf.cell(*cell)
self.set_style('B',b)
self.pdf.ln(self.theader[0][0][1])
self.pdf.set_x(self.table_offset)
#self.pdf.set_x(x)
self.theader_out = True
def output_table_footer(self):
if self.tfooter:
x = self.pdf.x
self.pdf.set_x(self.table_offset)
#TODO: self.output_table_sep()
for cell, bgcolor in self.tfooter:
self.box_shadow(cell[0], cell[1], bgcolor)
self.pdf.cell(*cell)
self.pdf.ln(self.tfooter[0][0][1])
self.pdf.set_x(x)
if int(self.table.get('border', 0)):
self.output_table_sep()
self.tfooter_out = True
def output_table_sep(self):
self.pdf.set_x(self.table_offset)
x1 = self.pdf.x
y1 = self.pdf.y
w = sum([self.width2mm(lenght) for lenght in self.table_col_width])
self.pdf.line(x1,y1,x1+w,y1)
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if DEBUG: print "STARTTAG", tag, attrs
if tag=='b' or tag=='i' or tag=='u':
self.set_style(tag,1)
if tag=='a':
self.href=attrs['href']
if tag=='br':
self.pdf.ln(5)
if tag=='p':
self.pdf.ln(5)
if attrs:
if attrs: self.align = attrs.get('align')
if tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6'):
k = (2, 1.5, 1.17, 1, 0.83, 0.67)[int(tag[1])]
self.pdf.ln(5*k)
self.pdf.set_text_color(150,0,0)
self.pdf.set_font_size(12 * k)
if attrs: self.align = attrs.get('align')
if tag=='hr':
self.put_line()
if tag=='pre':
self.pdf.set_font('Courier','',11)
self.pdf.set_font_size(11)
self.set_style('B',False)
self.set_style('I',False)
self.pre = True
if tag=='blockquote':
self.set_text_color(100,0,45)
self.pdf.ln(3)
if tag=='ul':
self.indent+=1
self.bullet.append('\x95')
if tag=='ol':
self.indent+=1
self.bullet.append(0)
if tag=='li':
self.pdf.ln(self.h+2)
self.pdf.set_text_color(190,0,0)
bullet = self.bullet[self.indent-1]
if not isinstance(bullet, basestring):
bullet += 1
self.bullet[self.indent-1] = bullet
bullet = "%s. " % bullet
self.pdf.write(self.h,'%s%s ' % (' '*5*self.indent, bullet))
self.set_text_color()
if tag=='font':
# save previous font state:
self.font_stack.append((self.font_face, self.font_size, self.color))
if 'color' in attrs:
self.color = hex2dec(attrs['color'])
self.set_text_color(*color)
self.color = color
if 'face' in attrs and attrs['face'].lower() in self.font_list:
face = attrs.get('face').lower()
self.pdf.set_font(face)
self.font_face = face
if 'size' in attrs:
size = int(attrs.get('size'))
self.pdf.set_font(self.font_face, size=int(size))
self.font_size = size
if tag=='table':
self.table = dict([(k.lower(), v) for k,v in attrs.items()])
if not 'width' in self.table:
self.table['width'] = '100%'
if self.table['width'][-1]=='%':
w = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin
w *= int(self.table['width'][:-1])/100.0
self.table_offset = (self.pdf.w-w)/2.0
self.table_col_width = []
self.theader_out = self.tfooter_out = False
self.theader = []
self.tfooter = []
self.thead = None
self.tfoot = None
self.table_h = 0
self.pdf.ln()
if tag=='tr':
self.tr = dict([(k.lower(), v) for k,v in attrs.items()])
self.table_col_index = 0
self.pdf.set_x(self.table_offset)
if tag=='td':
self.td = dict([(k.lower(), v) for k,v in attrs.items()])
if tag=='th':
self.td = dict([(k.lower(), v) for k,v in attrs.items()])
self.th = True
if 'width' in self.td:
self.table_col_width.append(self.td['width'])
if tag=='thead':
self.thead = {}
if tag=='tfoot':
self.tfoot = {}
if tag=='img':
if 'src' in attrs:
x = self.pdf.get_x()
y = self.pdf.get_y()
w = px2mm(attrs.get('width', 0))
h = px2mm(attrs.get('height',0))
if self.align and self.align[0].upper() == 'C':
x = (self.pdf.w-x)/2.0 - w/2.0
self.pdf.image(self.image_map(attrs['src']),
x, y, w, h, link=self.href)
self.pdf.set_x(x+w)
self.pdf.set_y(y+h)
if tag=='b' or tag=='i' or tag=='u':
self.set_style(tag, True)
if tag=='center':
self.align = 'Center'
def handle_endtag(self, tag):
#Closing tag
if DEBUG: print "ENDTAG", tag
if tag=='h1' or tag=='h2' or tag=='h3' or tag=='h4':
self.pdf.ln(6)
self.set_font()
self.set_style()
self.align = None
if tag=='pre':
self.pdf.set_font(self.font or 'Times','',12)
self.pdf.set_font_size(12)
self.pre=False
if tag=='blockquote':
self.set_text_color(0,0,0)
self.pdf.ln(3)
if tag=='strong':
tag='b'
if tag=='em':
tag='i'
if tag=='b' or tag=='i' or tag=='u':
self.set_style(tag, False)
if tag=='a':
self.href=''
if tag=='p':
self.align=''
if tag in ('ul', 'ol'):
self.indent-=1
self.bullet.pop()
if tag=='table':
if not self.tfooter_out:
self.output_table_footer()
self.table = None
self.th = False
self.theader = None
self.tfooter = None
self.pdf.ln()
if tag=='thead':
self.thead = None
if tag=='tfoot':
self.tfoot = None
if tag=='tbody':
# draw a line separator between table bodies
self.pdf.set_x(self.table_offset)
self.output_table_sep()
if tag=='tr':
h = self.table_h
if self.tfoot is None:
self.pdf.ln(h)
self.tr = None
if tag=='td' or tag=='th':
if self.th:
if DEBUG: print "revert style"
self.set_style('B', False) # revert style
self.table_col_index += int(self.td.get('colspan','1'))
self.td = None
self.th = False
if tag=='font':
# recover last font state
face, size, color = self.font_stack.pop()
if face:
self.pdf.set_text_color(0,0,0)
self.color = None
self.set_font(face, size)
self.font = None
if tag=='center':
self.align = None
def set_font(self, face=None, size=None):
if face:
self.font_face = face
if size:
self.font_size = size
self.h = size / 72.0*25.4
if DEBUG: print "H", self.h
self.pdf.set_font(self.font_face or 'times','',12)
self.pdf.set_font_size(self.font_size or 12)
self.set_style('u', False)
self.set_style('b', False)
self.set_style('i', False)
self.set_text_color()
def set_style(self, tag=None, enable=None):
#Modify style and select corresponding font
if tag:
t = self.style.get(tag.lower())
self.style[tag.lower()] = enable
style=''
for s in ('b','i','u'):
if self.style.get(s):
style+=s
if DEBUG: print "SET_FONT_STYLE", style
self.pdf.set_font('',style)
def set_text_color(self, r=None, g=0, b=0):
if r is None:
self.pdf.set_text_color(self.r,self.g,self.b)
else:
self.pdf.set_text_color(r, g, b)
self.r = r
self.g = g
self.b = b
def put_link(self, url, txt):
#Put a hyperlink
self.set_text_color(0,0,255)
self.set_style('u', True)
self.pdf.write(5,txt,url)
self.set_style('u', False)
self.set_text_color(0)
def put_line(self):
self.pdf.ln(2)
self.pdf.line(self.pdf.get_x(),self.pdf.get_y(),self.pdf.get_x()+187,self.pdf.get_y())
self.pdf.ln(3)
class HTMLMixin(object):
def write_html(self, text, image_map=None):
"Parse HTML and convert it to PDF"
h2p = HTML2FPDF(self, image_map)
h2p.feed(text)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"FPDF for python"
__license__ = "LGPL 3.0"
__version__ = "1.7"
from fpdf import *
try:
from html import HTMLMixin
except ImportError:
import warnings
warnings.warn("web2py gluon package not installed, required for html2pdf")
from template import Template
| Python |
#!/usr/bin/env python
# coding:utf-8
"Queues(Pipe)-based independent remote client-server Python Debugger"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2011 Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "1.01b"
# remote debugger queue-based (jsonrpc-like interface):
# - bidirectional communication (request - response calls in both ways)
# - request with id == null is a notification (do not send a response)
# - request with a value for id is a normal call, wait response
# based on idle, inspired by pythonwin implementation, taken many code from pdb
import bdb
import inspect
import linecache
import os
import sys
import traceback
import cmd
import pydoc
import threading
class Qdb(bdb.Bdb):
"Qdb Debugger Backend"
def __init__(self, pipe, redirect_stdio=True, allow_interruptions=False,
skip=[__name__]):
kwargs = {}
if sys.version_info > (2, 7):
kwargs['skip'] = skip
bdb.Bdb.__init__(self, **kwargs)
self.frame = None
self.i = 1 # sequential RPC call id
self.waiting = False
self.pipe = pipe # for communication
self._wait_for_mainpyfile = False
self._wait_for_breakpoint = False
self.mainpyfile = ""
self._lineno = None # last listed line numbre
# replace system standard input and output (send them thru the pipe)
if redirect_stdio:
sys.stdin = self
sys.stdout = self
sys.stderr = self
if allow_interruptions:
# fake breakpoint to prevent removing trace_dispatch on set_continue
self.breaks[None] = []
self.allow_interruptions = allow_interruptions
self.burst = 0 # do not send notifications ("burst" mode)
self.params = {} # optional parameters for interaction
def pull_actions(self):
# receive a remote procedure call from the frontend:
# returns True if action processed
# None when 'run' notification is received (see 'startup')
request = self.pipe.recv()
if request.get("method") == 'run':
return None
response = {'version': '1.1', 'id': request.get('id'),
'result': None,
'error': None}
try:
# dispatch message (JSON RPC like)
method = getattr(self, request['method'])
response['result'] = method.__call__(*request['args'],
**request.get('kwargs', {}))
except Exception, e:
response['error'] = {'code': 0, 'message': str(e)}
# send the result for normal method calls, not for notifications
if request.get('id'):
self.pipe.send(response)
return True
# Override Bdb methods
def trace_dispatch(self, frame, event, arg):
# check for non-interaction rpc (set_breakpoint, interrupt)
while self.allow_interruptions and self.pipe.poll():
self.pull_actions()
# process the frame (see Bdb.trace_dispatch)
if self.quitting:
return # None
if event == 'line':
return self.dispatch_line(frame)
if event == 'call':
return self.dispatch_call(frame, arg)
if event == 'return':
return self.dispatch_return(frame, arg)
if event == 'exception':
return self.dispatch_exception(frame, arg)
return self.trace_dispatch
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile or self._wait_for_breakpoint:
return
if self.stop_here(frame):
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (not self.canonic(frame.f_code.co_filename).startswith(self.mainpyfile)
or frame.f_lineno <= 0):
return
self._wait_for_mainpyfile = 0
if self._wait_for_breakpoint:
if not self.break_here(frame):
return
self._wait_for_breakpoint = 0
self.interaction(frame)
def user_exception(self, frame, info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
if self._wait_for_mainpyfile or self._wait_for_breakpoint:
return
extype, exvalue, trace = info
# pre-process stack trace as it isn't pickeable (cannot be sent pure)
msg = ''.join(traceback.format_exception(extype, exvalue, trace))
trace = traceback.extract_tb(trace)
title = traceback.format_exception_only(extype, exvalue)[0]
# send an Exception notification
msg = {'method': 'exception',
'args': (title, extype.__name__, exvalue, trace, msg),
'id': None}
self.pipe.send(msg)
self.interaction(frame, info)
def run(self, code, interp=None, *args, **kwargs):
try:
return bdb.Bdb.run(self, code, *args, **kwargs)
finally:
pass
def runcall(self, function, interp=None, *args, **kwargs):
try:
self.interp = interp
return bdb.Bdb.runcall(self, function, *args, **kwargs)
finally:
pass
def _runscript(self, filename):
# The script has to run in __main__ namespace (clear it)
import __main__
import imp
__main__.__dict__.clear()
__main__.__dict__.update({"__name__": "__main__",
"__file__": filename,
"__builtins__": __builtins__,
"imp": imp, # need for run
})
# avoid stopping before we reach the main script
self._wait_for_mainpyfile = 1
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = 0
statement = 'imp.load_source("__main__", "%s")' % filename
# notify and wait frontend to set initial params and breakpoints
self.pipe.send({'method': 'startup', 'args': (__version__, )})
while self.pull_actions() is not None:
pass
self.run(statement)
# General interaction function
def interaction(self, frame, info=None):
# chache frame locals to ensure that modifications are not overwritten
self.frame_locals = frame and frame.f_locals or {}
# extract current filename and line number
code, lineno = frame.f_code, frame.f_lineno
filename = code.co_filename
basename = os.path.basename(filename)
message = "%s:%s" % (basename, lineno)
if code.co_name != "?":
message = "%s: %s()" % (message, code.co_name)
# wait user events
self.waiting = True
self.frame = frame
try:
while self.waiting:
# sync_source_line()
if frame and filename[:1] + filename[-1:] != "<>" and os.path.exists(filename):
line = linecache.getline(filename, self.frame.f_lineno,
self.frame.f_globals)
else:
line = ""
# send the notification (debug event) - DOESN'T WAIT RESPONSE
self.burst -= 1
if self.burst < 0:
kwargs = {}
if self.params.get('call_stack'):
kwargs['call_stack'] = self.do_where()
if self.params.get('environment'):
kwargs['environment'] = self.do_environment()
self.pipe.send({'method': 'interaction', 'id': None,
'args': (filename, self.frame.f_lineno, line),
'kwargs': kwargs})
self.pull_actions()
finally:
self.waiting = False
self.frame = None
def do_debug(self, mainpyfile=None, wait_breakpoint=1):
self.reset()
if not wait_breakpoint or mainpyfile:
self._wait_for_mainpyfile = 1
if not mainpyfile:
frame = sys._getframe().f_back
mainpyfile = frame.f_code.co_filename
self.mainpyfile = self.canonic(mainpyfile)
self._wait_for_breakpoint = wait_breakpoint
sys.settrace(self.trace_dispatch)
def set_trace(self, frame=None):
# start debugger interaction immediatelly
if frame is None:
frame = sys._getframe().f_back
self._wait_for_mainpyfile = frame.f_code.co_filename
self._wait_for_breakpoint = 0
bdb.Bdb.set_trace(self, frame)
# Command definitions, called by interaction()
def do_continue(self):
self.set_continue()
self.waiting = False
def do_step(self):
self.set_step()
self.waiting = False
def do_return(self):
self.set_return(self.frame)
self.waiting = False
def do_next(self):
self.set_next(self.frame)
self.waiting = False
def interrupt(self):
self.set_step()
def do_quit(self):
self.set_quit()
self.waiting = False
def do_jump(self, lineno):
arg = int(lineno)
try:
self.frame.f_lineno = arg
return arg
except ValueError, e:
print '*** Jump failed:', e
return False
def do_list(self, arg):
last = None
if arg:
if isinstance(arg, tuple):
first, last = arg
else:
first = arg
elif not self._lineno:
first = max(1, self.frame.f_lineno - 5)
else:
first = self._lineno + 1
if last is None:
last = first + 10
filename = self.frame.f_code.co_filename
breaklist = self.get_file_breaks(filename)
lines = []
for lineno in range(first, last + 1):
line = linecache.getline(filename, lineno,
self.frame.f_globals)
if not line:
lines.append((filename, lineno, '', current, "<EOF>\n"))
break
else:
breakpoint = "B" if lineno in breaklist else ""
current = "->" if self.frame.f_lineno == lineno else ""
lines.append((filename, lineno, breakpoint, current, line))
self._lineno = lineno
return lines
def do_read(self, filename):
return open(filename, "Ur").read()
def do_set_breakpoint(self, filename, lineno, temporary=0, cond=None):
return self.set_break(filename, int(lineno), temporary, cond)
def do_list_breakpoint(self):
breaks = []
if self.breaks: # There's at least one
for bp in bdb.Breakpoint.bpbynumber:
if bp:
breaks.append((bp.number, bp.file, bp.line,
bp.temporary, bp.enabled, bp.hits, bp.cond, ))
return breaks
def do_clear_breakpoint(self, filename, lineno):
self.clear_break(filename, lineno)
def do_clear_file_breakpoints(self, filename):
self.clear_all_file_breaks(filename)
def do_clear(self, arg):
# required by BDB to remove temp breakpoints!
err = self.clear_bpbynumber(arg)
if err:
print '*** DO_CLEAR failed', err
def do_eval(self, arg, safe=True):
ret = eval(arg, self.frame.f_globals,
self.frame_locals)
if safe:
ret = pydoc.cram(repr(ret), 255)
return ret
def do_exec(self, arg):
locals = self.frame_locals
globals = self.frame.f_globals
code = compile(arg + '\n', '<stdin>', 'single')
save_displayhook = sys.displayhook
self.displayhook_value = None
try:
sys.displayhook = self.displayhook
exec code in globals, locals
finally:
sys.displayhook = save_displayhook
return self.displayhook_value
def do_where(self):
"print_stack_trace"
stack, curindex = self.get_stack(self.frame, None)
lines = []
for frame, lineno in stack:
filename = frame.f_code.co_filename
line = linecache.getline(filename, lineno)
lines.append((filename, lineno, "", "", line, ))
return lines
def do_environment(self):
"return current frame local and global environment"
env = {'locals': {}, 'globals': {}}
# converts the frame global and locals to a short text representation:
if self.frame:
for name, value in self.frame_locals.items():
env['locals'][name] = pydoc.cram(repr(
value), 255), repr(type(value))
for name, value in self.frame.f_globals.items():
env['globals'][name] = pydoc.cram(repr(
value), 20), repr(type(value))
return env
def get_autocomplete_list(self, expression):
"Return list of auto-completion options for expression"
try:
obj = self.do_eval(expression)
except:
return []
else:
return dir(obj)
def get_call_tip(self, expression):
"Return list of auto-completion options for expression"
try:
obj = self.do_eval(expression)
except Exception, e:
return ('', '', str(e))
else:
name = ''
try:
name = obj.__name__
except AttributeError:
pass
argspec = ''
drop_self = 0
f = None
try:
if inspect.isbuiltin(obj):
pass
elif inspect.ismethod(obj):
# Get the function from the object
f = obj.im_func
drop_self = 1
elif inspect.isclass(obj):
# Get the __init__ method function for the class.
if hasattr(obj, '__init__'):
f = obj.__init__.im_func
else:
for base in object.__bases__:
if hasattr(base, '__init__'):
f = base.__init__.im_func
break
if f is not None:
drop_self = 1
elif callable(obj):
# use the obj as a function by default
f = obj
# Get the __call__ method instead.
f = obj.__call__.im_func
drop_self = 0
except AttributeError:
pass
if f:
argspec = apply(inspect.formatargspec, inspect.getargspec(f))
doc = ''
if callable(obj):
try:
doc = inspect.getdoc(obj)
except:
pass
return (name, argspec[1:-1], doc.strip())
def set_burst(self, val):
"Set burst mode -multiple command count- (shut up notifications)"
self.burst = val
def set_params(self, params):
"Set parameters for interaction"
self.params.update(params)
def displayhook(self, obj):
"""Custom displayhook for the do_exec which prevents
assignment of the _ variable in the builtins.
"""
self.displayhook_value = repr(obj)
def reset(self):
bdb.Bdb.reset(self)
self.waiting = False
self.frame = None
def post_mortem(self, t=None):
# handling the default
if t is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns None
t = sys.exc_info()[2]
if t is None:
raise ValueError("A valid traceback must be passed if no "
"exception is being handled")
self.reset()
# get last frame:
while t is not None:
frame = t.tb_frame
t = t.tb_next
code, lineno = frame.f_code, frame.f_lineno
filename = code.co_filename
line = linecache.getline(filename, lineno)
#(filename, lineno, "", current, line, )}
self.interaction(frame)
# console file-like object emulation
def readline(self):
"Replacement for stdin.readline()"
msg = {'method': 'readline', 'args': (), 'id': self.i}
self.pipe.send(msg)
msg = self.pipe.recv()
self.i += 1
return msg['result']
def readlines(self):
"Replacement for stdin.readlines()"
lines = []
while lines[-1:] != ['\n']:
lines.append(self.readline())
return lines
def write(self, text):
"Replacement for stdout.write()"
msg = {'method': 'write', 'args': (text, ), 'id': None}
self.pipe.send(msg)
def writelines(self, l):
map(self.write, l)
def flush(self):
pass
def isatty(self):
return 0
class QueuePipe(object):
"Simulated pipe for threads (using two queues)"
def __init__(self, name, in_queue, out_queue):
self.__name = name
self.in_queue = in_queue
self.out_queue = out_queue
def send(self, data):
self.out_queue.put(data, block=True)
def recv(self, count=None, timeout=None):
data = self.in_queue.get(block=True, timeout=timeout)
return data
def poll(self, timeout=None):
return not self.in_queue.empty()
def close(self):
pass
class RPCError(RuntimeError):
"Remote Error (not user exception)"
pass
class Frontend(object):
"Qdb generic Frontend interface"
def __init__(self, pipe):
self.i = 1
self.pipe = pipe
self.notifies = []
self.read_lock = threading.RLock()
self.write_lock = threading.RLock()
def recv(self):
self.read_lock.acquire()
try:
return self.pipe.recv()
finally:
self.read_lock.release()
def send(self, data):
self.write_lock.acquire()
try:
return self.pipe.send(data)
finally:
self.write_lock.release()
def startup(self):
self.send({'method': 'run', 'args': (), 'id': None})
def interaction(self, filename, lineno, line, *kwargs):
raise NotImplementedError
def exception(self, title, extype, exvalue, trace, request):
"Show a user_exception"
raise NotImplementedError
def write(self, text):
"Console output (print)"
raise NotImplementedError
def readline(self, text):
"Console input/rawinput"
raise NotImplementedError
def run(self):
"Main method dispatcher (infinite loop)"
if self.pipe:
if not self.notifies:
# wait for a message...
request = self.recv()
else:
# process an asyncronus notification received earlier
request = self.notifies.pop(0)
return self.process_message(request)
def process_message(self, request):
if request:
result = None
if request.get("error"):
# it is not supposed to get an error here
# it should be raised by the method call
raise RPCError(res['error']['message'])
elif request.get('method') == 'interaction':
self.interaction(*request.get("args"), **request.get("kwargs"))
elif request.get('method') == 'startup':
self.startup()
elif request.get('method') == 'exception':
self.exception(*request['args'])
elif request.get('method') == 'write':
self.write(*request.get("args"))
elif request.get('method') == 'readline':
result = self.readline()
if result:
response = {'version': '1.1', 'id': request.get('id'),
'result': result,
'error': None}
self.send(response)
return True
def call(self, method, *args):
"Actually call the remote method (inside the thread)"
req = {'method': method, 'args': args, 'id': self.i}
self.send(req)
self.i += 1 # increment the id
while 1:
# wait until command acknowledge (response id match the request)
res = self.recv()
if 'id' not in res or not res['id']:
# nested notification received (i.e. write)! process it!
self.process_message(res)
elif 'result' not in res:
# nested request received (i.e. readline)! process it!
self.process_message(res)
elif long(req['id']) != long(res['id']):
print "DEBUGGER wrong packet received: expecting id", req[
'id'], res['id']
# protocol state is unknown
elif 'error' in res and res['error']:
raise RPCError(res['error']['message'])
else:
return res['result']
def do_step(self, arg=None):
"Execute the current line, stop at the first possible occasion"
self.call('do_step')
def do_next(self, arg=None):
"Execute the current line, do not stop at function calls"
self.call('do_next')
def do_continue(self, arg=None):
"Continue execution, only stop when a breakpoint is encountered."
self.call('do_continue')
def do_return(self, arg=None):
"Continue execution until the current function returns"
self.call('do_return')
def do_jump(self, arg):
"Set the next line that will be executed."
res = self.call('do_jump', arg)
print res
def do_where(self, arg=None):
"Print a stack trace, with the most recent frame at the bottom."
return self.call('do_where')
def do_quit(self, arg=None):
"Quit from the debugger. The program being executed is aborted."
self.call('do_quit')
def do_eval(self, expr):
"Inspect the value of the expression"
return self.call('do_eval', expr)
def do_environment(self):
"List all the locals and globals variables (string representation)"
return self.call('do_environment')
def do_list(self, arg=None):
"List source code for the current file"
return self.call('do_list', arg)
def do_read(self, filename):
"Read and send a local filename"
return self.call('do_read', filename)
def do_set_breakpoint(self, filename, lineno, temporary=0, cond=None):
"Set a breakpoint at filename:breakpoint"
self.call('do_set_breakpoint', filename, lineno, temporary, cond)
def do_clear_breakpoint(self, filename, lineno):
"Remove a breakpoint at filename:breakpoint"
self.call('do_clear_breakpoint', filename, lineno)
def do_clear_file_breakpoints(self, filename):
"Remove all breakpoints at filename"
self.call('do_clear_breakpoints', filename, lineno)
def do_list_breakpoint(self):
"List all breakpoints"
return self.call('do_list_breakpoint')
def do_exec(self, statement):
return self.call('do_exec', statement)
def get_autocomplete_list(self, expression):
return self.call('get_autocomplete_list', expression)
def get_call_tip(self, expression):
return self.call('get_call_tip', expression)
def interrupt(self):
"Immediately stop at the first possible occasion (outside interaction)"
# this is a notification!, do not expect a response
req = {'method': 'interrupt', 'args': ()}
self.send(req)
def set_burst(self, value):
req = {'method': 'set_burst', 'args': (value, )}
self.send(req)
def set_params(self, params):
req = {'method': 'set_params', 'args': (params, )}
self.send(req)
class Cli(Frontend, cmd.Cmd):
"Qdb Front-end command line interface"
def __init__(self, pipe, completekey='tab', stdin=None, stdout=None, skip=None):
cmd.Cmd.__init__(self, completekey, stdin, stdout)
Frontend.__init__(self, pipe)
# redefine Frontend methods:
def run(self):
while 1:
try:
Frontend.run(self)
except KeyboardInterrupt:
print "Interupting..."
self.interrupt()
def interaction(self, filename, lineno, line):
print "> %s(%d)\n-> %s" % (filename, lineno, line),
self.filename = filename
self.cmdloop()
def exception(self, title, extype, exvalue, trace, request):
print "=" * 80
print "Exception", title
print request
print "-" * 80
def write(self, text):
print text,
def readline(self):
return raw_input()
def postcmd(self, stop, line):
return not line.startswith("h") # stop
do_h = cmd.Cmd.do_help
do_s = Frontend.do_step
do_n = Frontend.do_next
do_c = Frontend.do_continue
do_r = Frontend.do_return
do_j = Frontend.do_jump
do_q = Frontend.do_quit
def do_eval(self, args):
"Inspect the value of the expression"
print Frontend.do_eval(self, args)
def do_list(self, args):
"List source code for the current file"
lines = Frontend.do_list(self, eval(args, {}, {}) if args else None)
self.print_lines(lines)
def do_where(self, args):
"Print a stack trace, with the most recent frame at the bottom."
lines = Frontend.do_where(self)
self.print_lines(lines)
def do_environment(self, args=None):
env = Frontend.do_environment(self)
for key in env:
print "=" * 78
print key.capitalize()
print "-" * 78
for name, value in env[key].items():
print "%-12s = %s" % (name, value)
def do_list_breakpoint(self, arg=None):
"List all breakpoints"
breaks = Frontend.do_list_breakpoint(self)
print "Num File Line Temp Enab Hits Cond"
for bp in breaks:
print '%-4d%-30s%4d %4s %4s %4d %s' % bp
print
def do_set_breakpoint(self, arg):
"Set a breakpoint at filename:breakpoint"
if arg:
if ':' in arg:
args = arg.split(":")
else:
args = (self.filename, arg)
Frontend.do_set_breakpoint(self, *args)
else:
self.do_list_breakpoint()
do_b = do_set_breakpoint
do_l = do_list
do_p = do_eval
do_w = do_where
do_e = do_environment
def default(self, line):
"Default command"
if line[:1] == '!':
print self.do_exec(line[1:])
else:
print "*** Unknown command: ", line
def print_lines(self, lines):
for filename, lineno, bp, current, source in lines:
print "%s:%4d%s%s\t%s" % (filename, lineno, bp, current, source),
print
def test():
def f(pipe):
print "creating debugger"
qdb = Qdb(pipe=pipe, redirect_stdio=False)
print "set trace"
my_var = "Mariano!"
qdb.set_trace()
print "hello world!"
print "good by!"
saraza
if '--process' in sys.argv:
from multiprocessing import Process, Pipe
pipe, child_conn = Pipe()
p = Process(target=f, args=(child_conn,))
else:
from threading import Thread
from Queue import Queue
parent_queue, child_queue = Queue(), Queue()
front_conn = QueuePipe("parent", parent_queue, child_queue)
child_conn = QueuePipe("child", child_queue, parent_queue)
p = Thread(target=f, args=(child_conn,))
p.start()
import time
class Test(Frontend):
def interaction(self, *args):
print "interaction!", args
def exception(self, *args):
print "exception", args
#raise RuntimeError("exception %s" % repr(args))
qdb = Test(front_conn)
time.sleep(5)
while 1:
print "running..."
Frontend.run(qdb)
time.sleep(1)
print "do_next"
qdb.do_next()
p.join()
def connect(host="localhost", port=6000, authkey='secret password'):
"Connect to a running debugger backend"
address = (host, port)
from multiprocessing.connection import Client
print "qdb debugger fronted: waiting for connection to", address
conn = Client(address, authkey=authkey)
try:
Cli(conn).run()
except EOFError:
pass
finally:
conn.close()
def main(host='localhost', port=6000, authkey='secret password'):
"Debug a script and accept a remote frontend"
if not sys.argv[1:] or sys.argv[1] in ("--help", "-h"):
print "usage: pdb.py scriptfile [arg] ..."
sys.exit(2)
mainpyfile = sys.argv[1] # Get script filename
if not os.path.exists(mainpyfile):
print 'Error:', mainpyfile, 'does not exist'
sys.exit(1)
del sys.argv[0] # Hide "pdb.py" from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
from multiprocessing.connection import Listener
address = (host, port) # family is deduced to be 'AF_INET'
listener = Listener(address, authkey=authkey)
print "qdb debugger backend: waiting for connection at", address
conn = listener.accept()
print 'qdb debugger backend: connected to', listener.last_accepted
# create the backend
qdb = Qdb(conn, redirect_stdio=True, allow_interruptions=True)
try:
print "running", mainpyfile
qdb._runscript(mainpyfile)
print "The program finished"
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
raise
except:
raise
conn.close()
listener.close()
qdb = None
def set_trace(host='localhost', port=6000, authkey='secret password'):
"Simplified interface to debug running programs"
global qdb, listener, conn
from multiprocessing.connection import Listener
# only create it if not currently instantiated
if not qdb:
address = (host, port) # family is deduced to be 'AF_INET'
listener = Listener(address, authkey=authkey)
conn = listener.accept()
# create the backend
qdb = Qdb(conn)
# start debugger backend:
qdb.set_trace()
def quit():
"Remove trace and quit"
global qdb, listener, conn
if qdb:
sys.settrace(None)
qdb = None
if conn:
conn.close()
conn = None
if listener:
listener.close()
listener = None
if __name__ == '__main__':
# When invoked as main program:
if '--test' in sys.argv:
test()
# Check environment for configuration parameters:
kwargs = {}
for param in 'host', 'port', 'authkey':
if 'QDB_%s' % param.upper() in os.environ:
kwargs[param] = os.environ['QDB_%s' % param.upper()]
if not sys.argv[1:]:
# connect to a remote debbuger
connect(**kwargs)
else:
# start the debugger on a script
# reimport as global __main__ namespace is destroyed
import qdb
qdb.main(**kwargs)
| Python |
"""
Developed by Massimo Di Pierro
Released under the web2py license (LGPL)
What does it do?
if html is a variable containing HTML text and urls in the text, when you call
html = expend_html(html)
it automatically converts the url to links but when possible it embeds the object being linked.
In particular it can embed images, videos, audio files, documents (it uses the google code player),
as well as pages to a oembed service.
Google Doc Support
==================
Microsoft Word (.DOC, .DOCX)
Microsoft Excel (.XLS and .XLSX)
Microsoft PowerPoint 2007 / 2010 (.PPTX)
Apple Pages (.PAGES)
Adobe PDF (.PDF)
Adobe Illustrator (.AI)
Adobe Photoshop (.PSD)
Autodesk AutoCad (.DXF)
Scalable Vector Graphics (.SVG)
PostScript (.EPS, .PS)
TrueType (.TTF)
XML Paper Specification (.XPS)
Oembed Support
==============
flickr.com
youtube.com
hulu.com
vimeo.com
slideshare.net
qik.com
polleverywhere.com
wordpress.com
revision3.com
viddler.com
"""
import re
import cgi
import sys
from simplejson import loads
import urllib
import uuid
try:
from BeautifulSoup import BeautifulSoup, Comment
have_soup = True
except ImportError:
have_soup = False
regex_link = re.compile('https?://\S+')
EMBED_MAPS = [
(re.compile('http://\S*?flickr.com/\S*'),
'http://www.flickr.com/services/oembed/'),
(re.compile('http://\S*.youtu(\.be|be\.com)/watch\S*'),
'http://www.youtube.com/oembed'),
(re.compile('http://www.hulu.com/watch/\S*'),
'http://www.hulu.com/api/oembed.json'),
(re.compile('http://vimeo.com/\S*'),
'http://vimeo.com/api/oembed.json'),
(re.compile('http://www.slideshare.net/[^\/]+/\S*'),
'http://www.slideshare.net/api/oembed/2'),
(re.compile('http://qik.com/\S*'),
'http://qik.com/api/oembed.json'),
(re.compile('http://www.polleverywhere.com/\w+/\S+'),
'http://www.polleverywhere.com/services/oembed/'),
(re.compile('http://\S+.wordpress.com/\S+'),
'http://public-api.wordpress.com/oembed/'),
(re.compile('http://*.revision3.com/\S+'),
'http://revision3.com/api/oembed/'),
(re.compile('http://\S+.viddler.com/\S+'),
'http://lab.viddler.com/services/oembed/'),
]
def image(url):
return '<img src="%s" style="max-width:100%%"/>' % url
def audio(url):
return '<audio controls="controls" style="max-width:100%%"><source src="%s" /></audio>' % url
def video(url):
return '<video controls="controls" style="max-width:100%%"><source src="%s" /></video>' % url
def googledoc_viewer(url):
return '<iframe src="http://docs.google.com/viewer?url=%s&embedded=true" style="max-width:100%%"></iframe>' % urllib.quote(url)
def web2py_component(url):
code = str(uuid.uuid4())
return '<div id="%s"></div><script>\nweb2py_component("%s","%s");\n</script>' % (code, url, code)
EXTENSION_MAPS = {
'png': image,
'gif': image,
'jpg': image,
'jpeg': image,
'wav': audio,
'ogg': audio,
'mp3': audio,
'mov': video,
'mpe': video,
'mp4': video,
'mpg': video,
'mpg2': video,
'mpeg': video,
'mpeg4': video,
'movie': video,
'load': web2py_component,
'pdf': googledoc_viewer,
'doc': googledoc_viewer,
'docx': googledoc_viewer,
'ppt': googledoc_viewer,
'pptx': googledoc_viewer,
'xls': googledoc_viewer,
'xlsx': googledoc_viewer,
'pages': googledoc_viewer,
'ai': googledoc_viewer,
'psd': googledoc_viewer,
'xdf': googledoc_viewer,
'svg': googledoc_viewer,
'ttf': googledoc_viewer,
'xps': googledoc_viewer,
}
class VimeoURLOpener(urllib.FancyURLopener):
"Vimeo blocks the urllib user agent for some reason"
version = "Mozilla/4.0"
urllib._urlopener = VimeoURLOpener()
def oembed(url):
for k, v in EMBED_MAPS:
if k.match(url):
oembed = v + '?format=json&url=' + cgi.escape(url)
try:
data = urllib.urlopen(oembed).read()
return loads(data) # json!
except:
pass
return {}
def extension(url):
return url.split('?')[0].split('.')[-1].lower()
def expand_one(url, cdict):
# try ombed but first check in cache
if cdict and url in cdict:
r = cdict[url]
else:
r = oembed(url)
if isinstance(cdict, dict):
cdict[url] = r
# if oembed service
if 'html' in r:
html = r['html'].encode('utf8')
if html.startswith('<object'):
return '<embed style="max-width:100%%">%s</embed>' % html
else:
return html
elif 'url' in r:
url = r['url'].encode('utf8')
# embed images, video, audio files
ext = extension(url)
if ext in EXTENSION_MAPS:
return EXTENSION_MAPS[ext](url)
# else regular link
return '<a href="%(u)s">%(u)s</a>' % dict(u=url)
def expand_html(html, cdict=None):
if not have_soup:
raise RuntimeError("Missing BeautifulSoup")
soup = BeautifulSoup(html)
comments = soup.findAll(text=lambda text: isinstance(text, Comment))
[comment.extract() for comment in comments]
for txt in soup.findAll(text=True):
if not txt.parent.name in ('a', 'script', 'pre', 'code', 'embed', 'object', 'audio', 'video'):
ntxt = regex_link.sub(
lambda match: expand_one(match.group(0), cdict), txt)
txt.replaceWith(BeautifulSoup(ntxt))
return str(soup)
def test():
example = """
<h3>Fringilla nisi parturient nullam</h3>
<p>http://www.youtube.com/watch?v=IWBFiI5RrA0</p>
<p>http://www.web2py.com/examples/static/images/logo_bw.png</p>
<p>http://www.web2py.com/examples/default/index.load</p>
<p>http://www.web2py.com/examples/static/web2py_manual_cutl.pdf</p>
<p>Elementum sodales est varius magna leo sociis erat. Nascetur pretium non
ultricies gravida. Condimentum at nascetur tempus. Porttitor viverra ipsum
accumsan neque aliquet. Ultrices vestibulum tempor quisque eget sem eget.
Ornare malesuada tempus dolor dolor magna consectetur. Nisl dui non curabitur
laoreet tortor.</p>
"""
return expand_html(example)
if __name__ == "__main__":
if len(sys.argv) > 1:
print expand_html(open(sys.argv[1]).read())
else:
print test()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Developed by Robin Bhattacharyya (memecache for GAE)
Released under the web2py license (LGPL)
from gluon.contrib.gae_memcache import MemcacheClient
cache.ram=cache.disk=MemcacheClient(request)
"""
import time
from google.appengine.api.memcache import Client
class MemcacheClient(object):
client = Client()
def __init__(self, request):
self.request = request
def __call__(
self,
key,
f,
time_expire=300,
):
key = '%s/%s' % (self.request.application, key)
dt = time_expire
value = None
obj = self.client.get(key)
if obj and (dt is None or obj[0] > time.time() - dt):
value = obj[1]
elif f is None:
if obj:
self.client.delete(key)
else:
value = f()
self.client.set(key, (time.time(), value))
return value
def increment(self, key, value=1):
key = '%s/%s' % (self.request.application, key)
obj = self.client.get(key)
if obj:
value = obj[1] + value
self.client.set(key, (time.time(), value))
return value
def clear(self, key=None):
if key:
key = '%s/%s' % (self.request.application, key)
self.client.delete(key)
else:
self.client.flush_all()
def delete(self, *a, **b):
return self.client.delete(*a, **b)
def get(self, *a, **b):
return self.client.delete(*a, **b)
def set(self, *a, **b):
return self.client.delete(*a, **b)
def flush_all(self, *a, **b):
return self.client.delete(*a, **b)
| Python |
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from scanner import make_scanner
def _import_c_scanstring():
try:
raise ImportError # because assumes simplejson in path
from simplejson._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corresponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = linecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, end)
else:
self.endlineno, self.endcolno = None, None
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise JSONDecodeError(msg, s, end)
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise JSONDecodeError(msg, s, end)
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting : delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise JSONDecodeError("No JSON object could be decoded", s, idx)
return obj, end
| Python |
"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
from UserDict import DictMixin
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| Python |
"""JSON token scanner
"""
import re
def _import_c_make_scanner():
try:
raise ImportError # because assumes simplejson in path
from simplejson._speedups import make_scanner
return make_scanner
except ImportError:
return None
c_make_scanner = _import_c_make_scanner()
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
memo = context.memo
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook, object_pairs_hook, memo)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
def scan_once(string, idx):
try:
return _scan_once(string, idx)
finally:
memo.clear()
return scan_once
make_scanner = c_make_scanner or py_make_scanner
| Python |
"""Implementation of JSONEncoder
"""
import re
from decimal import Decimal
def _import_speedups():
try:
raise ImportError # because assumes simplejson in path
from simplejson import _speedups
return _speedups.encode_basestring_ascii, _speedups.make_encoder
except ImportError:
return None, None
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
from decoder import PosInf
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None,
use_decimal=False):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
If use_decimal is true (not the default), ``decimal.Decimal`` will
be supported directly by the encoder. For the inverse, decode JSON
with ``parse_float=decimal.Decimal``.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.use_decimal = use_decimal
if isinstance(indent, (int, long)):
indent = ' ' * indent
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
key_memo = {}
if (_one_shot and c_make_encoder is not None
and self.indent is None):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan, key_memo, self.use_decimal)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.use_decimal)
try:
return _iterencode(o, 0)
finally:
key_memo.clear()
class JSONEncoderForHTML(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
_use_decimal,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
Decimal=Decimal,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| Python |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.1.3'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
raise ImportError # because assumes simplejson in path
from simplejson._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=False,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not use_decimal
and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not use_decimal
and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal, **kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import decoder as dec
import encoder as enc
import scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
| Python |
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
import simplejson as json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try:
try:
obj = json.load(infile,
object_pairs_hook=json.OrderedDict,
use_decimal=True)
except ValueError, e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True)
outfile.write('\n')
finally:
infile.close()
outfile.close()
if __name__ == '__main__':
main()
| Python |
import urllib
import simplejson
class Stripe:
"""
Usage:
key='<api key>'
d = Stripe(key).charge(
amount=100,
currency='usd',
card_number='4242424242424242',
card_exp_month='5',
card_exp_year='2012',
card_cvc_check='123',
description='test charge')
print d
print Stripe(key).check(d['id'])
print Stripe(key).refund(d['id'])
Sample output (python dict):
{u'fee': 0, u'description': u'test charge', u'created': 1321242072, u'refunded': False, u'livemode': False, u'object': u'charge', u'currency': u'usd', u'amount': 100, u'paid': True, u'id': u'ch_sdjasgfga83asf', u'card': {u'exp_month': 5, u'country': u'US', u'object': u'card', u'last4': u'4242', u'exp_year': 2012, u'type': u'Visa'}}
if paid is True than transaction was processed
"""
def __init__(self, key):
self.key = key
def charge(self,
amount,
currency='usd',
card_number='4242424242424242',
card_exp_month='5',
card_exp_year='2012',
card_cvc_check='123',
description='test charge'):
params = urllib.urlencode({'amount': amount,
'currency': currency,
'card[number]': card_number,
'card[exp_month]': card_exp_month,
'card[exp_year]': card_exp_year,
'card[cvc_check]': card_cvc_check,
'description': description})
u = urllib.urlopen('https://%s:@api.stripe.com/v1/charges' %
self.key, params)
return simplejson.loads(u.read())
def check(self, charge_id):
u = urllib.urlopen('https://%s:@api.stripe.com/v1/charges/%s' %
(self.key, charge_id))
return simplejson.loads(u.read())
def refund(self, charge_id):
params = urllib.urlencode({})
u = urllib.urlopen('https://%s:@api.stripe.com/v1/charges/%s/refund' %
(self.key, charge_id), params)
return simplejson.loads(u.read())
if __name__ == '__main__':
key = raw_input('user>')
d = Stripe(key).charge(100)
print 'charged', d['paid']
s = Stripe(key).check(d[u'id'])
print 'paid', s['paid'], s['amount'], s['currency']
s = Stripe(key).refund(d[u'id'])
print 'refunded', s['refunded']
| Python |
"""
DowCommerce class to process credit card payments with DowCommerce.com
Modifications to support Dow Commerce API from code originally written by John Conde
http://www.johnconde.net/blog/integrate-the-authorizenet-aim-api-with-python-3-2/
Unkown license, assuming public domain
Modifed by Dave Stoll dave.stoll@gmail.com
- modifed to support Dow Commerce API
"""
__all__ = ['DowCommerce']
from operator import itemgetter
import urllib
class DowCommerce:
class DowCommerceError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return str(self.parameter)
def __init__(self, username=None, password=None, demomode=False):
if not demomode:
if str(username).strip() == '' or username is None:
raise DowCommerce.DowCommerceError('No username provided')
if str(password).strip() == '' or password is None:
raise DowCommerce.DowCommerceError('No password provided')
else:
username = 'demo'
password = 'password'
self.proxy = None
self.delimiter = '&'
self.results = {}
self.error = True
self.success = False
self.declined = False
self.url = 'https://secure.dowcommerce.net/api/transact.php'
self.parameters = {}
self.setParameter('username', username)
self.setParameter('password', password)
def process(self):
encoded_args = urllib.urlencode(self.parameters)
if self.proxy is None:
results = str(urllib.urlopen(
self.url, encoded_args).read()).split(self.delimiter)
else:
opener = urllib.FancyURLopener(self.proxy)
opened = opener.open(self.url, encoded_args)
try:
results = str(opened.read()).split(self.delimiter)
finally:
opened.close()
for result in results:
(key, val) = result.split('=')
self.results[key] = val
if self.results['response'] == '1':
self.error = False
self.success = True
self.declined = False
elif self.results['response'] == '2':
self.error = False
self.success = False
self.declined = True
elif self.results['response'] == '3':
self.error = True
self.success = False
self.declined = False
else:
self.error = True
self.success = False
self.declined = False
raise DowCommerce.DowCommerceError(self.results)
def setTransaction(
self, creditcard, expiration, total, cvv=None, orderid=None, orderdescription=None,
ipaddress=None, tax=None, shipping=None,
firstname=None, lastname=None, company=None, address1=None, address2=None, city=None, state=None, zipcode=None,
country=None, phone=None, fax=None, emailaddress=None, website=None,
shipping_firstname=None, shipping_lastname=None, shipping_company=None, shipping_address1=None, shipping_address2=None,
shipping_city=None, shipping_state=None, shipping_zipcode=None, shipping_country=None, shipping_emailaddress=None):
if str(creditcard).strip() == '' or creditcard is None:
raise DowCommerce.DowCommerceError('No credit card number passed to setTransaction(): {0}'.format(creditcard))
if str(expiration).strip() == '' or expiration is None:
raise DowCommerce.DowCommerceError('No expiration number passed to setTransaction(): {0}'.format(expiration))
if str(total).strip() == '' or total is None:
raise DowCommerce.DowCommerceError('No total amount passed to setTransaction(): {0}'.format(total))
self.setParameter('ccnumber', creditcard)
self.setParameter('ccexp', expiration)
self.setParameter('amount', total)
if cvv:
self.setParameter('cvv', cvv)
if orderid:
self.setParameter('orderid', orderid)
if orderdescription:
self.setParameter('orderdescription', orderdescription)
if ipaddress:
self.setParameter('ipaddress', ipaddress)
if tax:
self.setParameter('tax', tax)
if shipping:
self.setParameter('shipping', shipping)
## billing info
if firstname:
self.setParameter('firstname', firstname)
if lastname:
self.setParameter('lastname', lastname)
if company:
self.setParameter('company', company)
if address1:
self.setParameter('address1', address1)
if address2:
self.setParameter('address2', address2)
if city:
self.setParameter('city', city)
if state:
self.setParameter('state', state)
if zipcode:
self.setParameter('zip', zipcode)
if country:
self.setParameter('country', country)
if phone:
self.setParameter('phone', phone)
if fax:
self.setParameter('fax', fax)
if emailaddress:
self.setParameter('email', emailaddress)
if website:
self.setParameter('website', website)
## shipping info
if shipping_firstname:
self.setParameter('shipping_firstname', shipping_firstname)
if shipping_lastname:
self.setParameter('shipping_lastname', shipping_lastname)
if shipping_company:
self.setParameter('shipping_company', shipping_company)
if shipping_address1:
self.setParameter('shipping_address1', shipping_address1)
if shipping_address2:
self.setParameter('shipping_address2', shipping_address2)
if shipping_city:
self.setParameter('shipping_city', shipping_city)
if shipping_state:
self.setParameter('shipping_state', shipping_state)
if shipping_zipcode:
self.setParameter('shipping_zip', shipping_zipcode)
if shipping_country:
self.setParameter('shipping_country', shipping_country)
def setTransactionType(self, transtype=None):
types = ['sale', 'auth', 'credit']
if transtype.lower() not in types:
raise DowCommerce.DowCommerceError('Incorrect Transaction Type passed to setTransactionType(): {0}'.format(transtype))
self.setParameter('type', transtype.lower())
def setProxy(self, proxy=None):
if str(proxy).strip() == '' or proxy is None:
raise DowCommerce.DowCommerceError('No proxy passed to setProxy()')
self.proxy = {'http': str(proxy).strip()}
def setParameter(self, key=None, value=None):
if key is not None and value is not None and str(key).strip() != '' and str(value).strip() != '':
self.parameters[key] = str(value).strip()
else:
raise DowCommerce.DowCommerceError('Incorrect parameters passed to setParameter(): {0}:{1}'.format(key, value))
def isApproved(self):
return self.success
def isDeclined(self):
return self.declined
def isError(self):
return self.error
def getResultResponseShort(self):
responses = ['', 'Approved', 'Declined', 'Error']
return responses[int(self.results['response'])]
def getFullResponse(self):
return self.results
def getResponseText(self):
return self.results['responsetext']
def test():
import socket
import sys
from time import time
## TEST VALUES FROM API DOC:
# Visa: 4111111111111111
# MasterCard 5431111111111111
# DiscoverCard: 6011601160116611
# American Express: 341111111111111
# Expiration: 10/10
# Amount: > 1.00 (( passing less than $1.00 will cause it to be declined ))
# CVV: 999
creditcard = '4111111111111111'
expiration = '1010'
total = '1.00'
cvv = '999'
tax = '0.00'
orderid = str(time())[4:10] # get a random invoice number
try:
payment = DowCommerce(demomode=True)
payment.setTransaction(
creditcard, expiration, total, cvv=cvv, tax=tax, orderid=orderid, orderdescription='Test Transaction',
firstname='John', lastname='Doe', company='Acme', address1='123 Min Street', city='Hometown', state='VA',
zipcode='12345', country='US', phone='888-555-1212', emailaddress='john@noemail.local', ipaddress='192.168.1.1')
payment.process()
if payment.isApproved():
print 'Payment approved!'
print payment.getFullResponse()
elif payment.isDeclined():
print 'Your credit card was declined by your bank'
elif payment.isError():
raise DowCommerce.DowCommerceError('An uncaught error occurred')
except DowCommerce.DowCommerceError, e:
print "Exception thrown:", e
print 'An error occured'
print 'approved', payment.isApproved()
print 'declined', payment.isDeclined()
print 'error', payment.isError()
if __name__ == '__main__':
test()
| Python |
#!/usr/bin/env python
# Copyright (c) 2007-2008 ActiveState Corp.
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
r"""A fast and complete Python implementation of Markdown.
[from http://daringfireball.net/projects/markdown/]
> Markdown is a text-to-HTML filter; it translates an easy-to-read /
> easy-to-write structured text format into HTML. Markdown's text
> format is most similar to that of plain text email, and supports
> features such as headers, *emphasis*, code blocks, blockquotes, and
> links.
>
> Markdown's syntax is designed not as a generic markup language, but
> specifically to serve as a front-end to (X)HTML. You can use span-level
> HTML tags anywhere in a Markdown document, and you can use block level
> HTML tags (like <div> and <table> as well).
Module usage:
>>> import markdown2
>>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)`
u'<p><em>boo!</em></p>\n'
>>> markdowner = Markdown()
>>> markdowner.convert("*boo!*")
u'<p><em>boo!</em></p>\n'
>>> markdowner.convert("**boom!**")
u'<p><strong>boom!</strong></p>\n'
This implementation of Markdown implements the full "core" syntax plus a
number of extras (e.g., code syntax coloring, footnotes) as described on
<http://code.google.com/p/python-markdown2/wiki/Extras>.
"""
cmdln_desc = """A fast and complete Python implementation of Markdown, a
text-to-HTML conversion tool for web writers.
"""
# Dev Notes:
# - There is already a Python markdown processor
# (http://www.freewisdom.org/projects/python-markdown/).
# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
# not yet sure if there implications with this. Compare 'pydoc sre'
# and 'perldoc perlre'.
__version_info__ = (1, 0, 1, 16) # first three nums match Markdown.pl
__version__ = '1.0.1.16'
__author__ = "Trent Mick"
import os
import sys
from pprint import pprint
import re
import logging
try:
from hashlib import md5
except ImportError:
from md5 import md5
import optparse
from random import random, randint
import codecs
from urllib import quote
#---- Python version compat
if sys.version_info[:2] < (2,4):
from sets import Set as set
def reversed(sequence):
for i in sequence[::-1]:
yield i
def _unicode_decode(s, encoding, errors='xmlcharrefreplace'):
return unicode(s, encoding, errors)
else:
def _unicode_decode(s, encoding, errors='strict'):
return s.decode(encoding, errors)
#---- globals
DEBUG = False
log = logging.getLogger("markdown")
DEFAULT_TAB_WIDTH = 4
try:
import uuid
except ImportError:
SECRET_SALT = str(randint(0, 1000000))
else:
SECRET_SALT = str(uuid.uuid4())
def _hash_ascii(s):
#return md5(s).hexdigest() # Markdown.pl effectively does this.
return 'md5-' + md5(SECRET_SALT + s).hexdigest()
def _hash_text(s):
return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest()
# Table of hash values for escaped characters:
g_escape_table = dict([(ch, _hash_ascii(ch))
for ch in '\\`*_{}[]()>#+-.!'])
#---- exceptions
class MarkdownError(Exception):
pass
#---- public api
def markdown_path(path, encoding="utf-8",
html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
fp = codecs.open(path, 'r', encoding)
try:
text = fp.read()
finally:
fp.close()
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
class Markdown(object):
# The dict of "extras" to enable in processing -- a mapping of
# extra name to argument for the extra. Most extras do not have an
# argument, in which case the value is None.
#
# This can be set via (a) subclassing and (b) the constructor
# "extras" argument.
extras = None
urls = None
titles = None
html_blocks = None
html_spans = None
html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py
# Used to track when we're inside an ordered or unordered list
# (see _ProcessListItems() for details):
list_level = 0
_ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
extras=None, link_patterns=None, use_file_vars=False):
if html4tags:
self.empty_element_suffix = ">"
else:
self.empty_element_suffix = " />"
self.tab_width = tab_width
# For compatibility with earlier markdown2.py and with
# markdown.py's safe_mode being a boolean,
# safe_mode == True -> "replace"
if safe_mode is True:
self.safe_mode = "replace"
else:
self.safe_mode = safe_mode
if self.extras is None:
self.extras = {}
elif not isinstance(self.extras, dict):
self.extras = dict([(e, None) for e in self.extras])
if extras:
if not isinstance(extras, dict):
extras = dict([(e, None) for e in extras])
self.extras.update(extras)
assert isinstance(self.extras, dict)
self._instance_extras = self.extras.copy()
self.link_patterns = link_patterns
self.use_file_vars = use_file_vars
self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
def reset(self):
self.urls = {}
self.titles = {}
self.html_blocks = {}
self.html_spans = {}
self.list_level = 0
self.extras = self._instance_extras.copy()
if "footnotes" in self.extras:
self.footnotes = {}
self.footnote_ids = []
def convert(self, text):
"""Convert the given text."""
# Main function. The order in which other subs are called here is
# essential. Link and image substitutions need to happen before
# _EscapeSpecialChars(), so that any *'s or _'s in the <a>
# and <img> tags get encoded.
# Clear the global hashes. If we don't clear these, you get conflicts
# from other articles when generating a page which contains more than
# one article (e.g. an index page that shows the N most recent
# articles):
self.reset()
if not isinstance(text, unicode):
#TODO: perhaps shouldn't presume UTF-8 for string input?
text = unicode(text, 'utf-8')
if self.use_file_vars:
# Look for emacs-style file variable hints.
emacs_vars = self._get_emacs_vars(text)
if "markdown-extras" in emacs_vars:
splitter = re.compile("[ ,]+")
for e in splitter.split(emacs_vars["markdown-extras"]):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
self.extras[ename] = earg
# Standardize line endings:
text = re.sub("\r\n|\r", "\n", text)
# Make sure $text ends with a couple of newlines:
text += "\n\n"
# Convert all tabs to spaces.
text = self._detab(text)
# Strip any lines consisting only of spaces and tabs.
# This makes subsequent regexen easier to write, because we can
# match consecutive blank lines with /\n+/ instead of something
# contorted like /[ \t]*\n+/ .
text = self._ws_only_line_re.sub("", text)
if self.safe_mode:
text = self._hash_html_spans(text)
# Turn block-level HTML blocks into hash entries
text = self._hash_html_blocks(text, raw=True)
# Strip link definitions, store in hashes.
if "footnotes" in self.extras:
# Must do footnotes first because an unlucky footnote defn
# looks like a link defn:
# [^4]: this "looks like a link defn"
text = self._strip_footnote_definitions(text)
text = self._strip_link_definitions(text)
text = self._run_block_gamut(text)
if "footnotes" in self.extras:
text = self._add_footnotes(text)
text = self._unescape_special_chars(text)
if self.safe_mode:
text = self._unhash_html_spans(text)
text += "\n"
return text
_emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
# This regular expression is intended to match blocks like this:
# PREFIX Local Variables: SUFFIX
# PREFIX mode: Tcl SUFFIX
# PREFIX End: SUFFIX
# Some notes:
# - "[ \t]" is used instead of "\s" to specifically exclude newlines
# - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
# not like anything other than Unix-style line terminators.
_emacs_local_vars_pat = re.compile(r"""^
(?P<prefix>(?:[^\r\n|\n|\r])*?)
[\ \t]*Local\ Variables:[\ \t]*
(?P<suffix>.*?)(?:\r\n|\n|\r)
(?P<content>.*?\1End:)
""", re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
def _get_emacs_vars(self, text):
"""Return a dictionary of emacs-style local variables.
Parsing is done loosely according to this spec (and according to
some in-practice deviations from this):
http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
"""
emacs_vars = {}
SIZE = pow(2, 13) # 8kB
# Search near the start for a '-*-'-style one-liner of variables.
head = text[:SIZE]
if "-*-" in head:
match = self._emacs_oneliner_vars_pat.search(head)
if match:
emacs_vars_str = match.group(1)
assert '\n' not in emacs_vars_str
emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
if s.strip()]
if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
# While not in the spec, this form is allowed by emacs:
# -*- Tcl -*-
# where the implied "variable" is "mode". This form
# is only allowed if there are no other variables.
emacs_vars["mode"] = emacs_var_strs[0].strip()
else:
for emacs_var_str in emacs_var_strs:
try:
variable, value = emacs_var_str.strip().split(':', 1)
except ValueError:
log.debug("emacs variables error: malformed -*- "
"line: %r", emacs_var_str)
continue
# Lowercase the variable name because Emacs allows "Mode"
# or "mode" or "MoDe", etc.
emacs_vars[variable.lower()] = value.strip()
tail = text[-SIZE:]
if "Local Variables" in tail:
match = self._emacs_local_vars_pat.search(tail)
if match:
prefix = match.group("prefix")
suffix = match.group("suffix")
lines = match.group("content").splitlines(0)
#print "prefix=%r, suffix=%r, content=%r, lines: %s"\
# % (prefix, suffix, match.group("content"), lines)
# Validate the Local Variables block: proper prefix and suffix
# usage.
for i, line in enumerate(lines):
if not line.startswith(prefix):
log.debug("emacs variables error: line '%s' "
"does not use proper prefix '%s'"
% (line, prefix))
return {}
# Don't validate suffix on last line. Emacs doesn't care,
# neither should we.
if i != len(lines)-1 and not line.endswith(suffix):
log.debug("emacs variables error: line '%s' "
"does not use proper suffix '%s'"
% (line, suffix))
return {}
# Parse out one emacs var per line.
continued_for = None
for line in lines[:-1]: # no var on the last line ("PREFIX End:")
if prefix: line = line[len(prefix):] # strip prefix
if suffix: line = line[:-len(suffix)] # strip suffix
line = line.strip()
if continued_for:
variable = continued_for
if line.endswith('\\'):
line = line[:-1].rstrip()
else:
continued_for = None
emacs_vars[variable] += ' ' + line
else:
try:
variable, value = line.split(':', 1)
except ValueError:
log.debug("local variables error: missing colon "
"in local variables entry: '%s'" % line)
continue
# Do NOT lowercase the variable name, because Emacs only
# allows "mode" (and not "Mode", "MoDe", etc.) in this block.
value = value.strip()
if value.endswith('\\'):
value = value[:-1].rstrip()
continued_for = variable
else:
continued_for = None
emacs_vars[variable] = value
# Unquote values.
for var, val in emacs_vars.items():
if len(val) > 1 and (val.startswith('"') and val.endswith('"')
or val.startswith('"') and val.endswith('"')):
emacs_vars[var] = val[1:-1]
return emacs_vars
# Cribbed from a post by Bart Lateur:
# <http://www.nntp.perl.org/group/perl.macperl.anyperl/154>
_detab_re = re.compile(r'(.*?)\t', re.M)
def _detab_sub(self, match):
g1 = match.group(1)
return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width))
def _detab(self, text):
r"""Remove (leading?) tabs from a file.
>>> m = Markdown()
>>> m._detab("\tfoo")
' foo'
>>> m._detab(" \tfoo")
' foo'
>>> m._detab("\t foo")
' foo'
>>> m._detab(" foo")
' foo'
>>> m._detab(" foo\n\tbar\tblam")
' foo\n bar blam'
"""
if '\t' not in text:
return text
return self._detab_re.subn(self._detab_sub, text)[0]
_block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
_strict_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_a,
re.X | re.M)
_block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
_liberal_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
.*</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_b,
re.X | re.M)
def _hash_html_block_sub(self, match, raw=False):
html = match.group(1)
if raw and self.safe_mode:
html = self._sanitize_html(html)
key = _hash_text(html)
self.html_blocks[key] = html
return "\n\n" + key + "\n\n"
def _hash_html_blocks(self, text, raw=False):
"""Hashify HTML blocks
We only want to do this for block-level HTML tags, such as headers,
lists, and tables. That's because we still want to wrap <p>s around
"paragraphs" that are wrapped in non-block-level tags, such as anchors,
phrase emphasis, and spans. The list of tags we're looking for is
hard-coded.
@param raw {boolean} indicates if these are raw HTML blocks in
the original source. It makes a difference in "safe" mode.
"""
if '<' not in text:
return text
# Pass `raw` value into our calls to self._hash_html_block_sub.
hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
# First, look for nested blocks, e.g.:
# <div>
# <div>
# tags for inner block must be indented.
# </div>
# </div>
#
# The outermost tags must start at the left margin for this to match, and
# the inner nested divs must be indented.
# We need to do this before the next, more liberal match, because the next
# match will start at the first `<div>` and stop at the first `</div>`.
text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
# Now match more liberally, simply from `\n<tag>` to `</tag>\n`
text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
# Special case just for <hr />. It was easier to make a special
# case than to make the other regex more complicated.
if "<hr" in text:
_hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
text = _hr_tag_re.sub(hash_html_block_sub, text)
# Special case for standalone HTML comments:
if "<!--" in text:
start = 0
while True:
# Delimiters for next comment block.
try:
start_idx = text.index("<!--", start)
except ValueError, ex:
break
try:
end_idx = text.index("-->", start_idx) + 3
except ValueError, ex:
break
# Start position for next comment block search.
start = end_idx
# Validate whitespace before comment.
if start_idx:
# - Up to `tab_width - 1` spaces before start_idx.
for i in range(self.tab_width - 1):
if text[start_idx - 1] != ' ':
break
start_idx -= 1
if start_idx == 0:
break
# - Must be preceded by 2 newlines or hit the start of
# the document.
if start_idx == 0:
pass
elif start_idx == 1 and text[0] == '\n':
start_idx = 0 # to match minute detail of Markdown.pl regex
elif text[start_idx-2:start_idx] == '\n\n':
pass
else:
break
# Validate whitespace after comment.
# - Any number of spaces and tabs.
while end_idx < len(text):
if text[end_idx] not in ' \t':
break
end_idx += 1
# - Must be following by 2 newlines or hit end of text.
if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
continue
# Escape and hash (must match `_hash_html_block_sub`).
html = text[start_idx:end_idx]
if raw and self.safe_mode:
html = self._sanitize_html(html)
key = _hash_text(html)
self.html_blocks[key] = html
text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
if "xml" in self.extras:
# Treat XML processing instructions and namespaced one-liner
# tags as if they were block HTML tags. E.g., if standalone
# (i.e. are their own paragraph), the following do not get
# wrapped in a <p> tag:
# <?foo bar?>
#
# <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/>
_xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)
text = _xml_oneliner_re.sub(hash_html_block_sub, text)
return text
def _strip_link_definitions(self, text):
# Strips link definitions from text, stores the URLs and titles in
# hash references.
less_than_tab = self.tab_width - 1
# Link defs are in the form:
# [id]: url "optional title"
_link_def_re = re.compile(r"""
^[ ]{0,%d}\[(.+)\]: # id = \1
[ \t]*
\n? # maybe *one* newline
[ \t]*
<?(.+?)>? # url = \2
[ \t]*
(?:
\n? # maybe one newline
[ \t]*
(?<=\s) # lookbehind for whitespace
['"(]
([^\n]*) # title = \3
['")]
[ \t]*
)? # title is optional
(?:\n+|\Z)
""" % less_than_tab, re.X | re.M | re.U)
return _link_def_re.sub(self._extract_link_def_sub, text)
def _extract_link_def_sub(self, match):
id, url, title = match.groups()
key = id.lower() # Link IDs are case-insensitive
self.urls[key] = self._encode_amps_and_angles(url)
if title:
self.titles[key] = title.replace('"', '"')
return ""
def _extract_footnote_def_sub(self, match):
id, text = match.groups()
text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
normed_id = re.sub(r'\W', '-', id)
# Ensure footnote text ends with a couple newlines (for some
# block gamut matches).
self.footnotes[normed_id] = text + "\n\n"
return ""
def _strip_footnote_definitions(self, text):
"""A footnote definition looks like this:
[^note-id]: Text of the note.
May include one or more indented paragraphs.
Where,
- The 'note-id' can be pretty much anything, though typically it
is the number of the footnote.
- The first paragraph may start on the next line, like so:
[^note-id]:
Text of the note.
"""
less_than_tab = self.tab_width - 1
footnote_def_re = re.compile(r'''
^[ ]{0,%d}\[\^(.+)\]: # id = \1
[ \t]*
( # footnote text = \2
# First line need not start with the spaces.
(?:\s*.*\n+)
(?:
(?:[ ]{%d} | \t) # Subsequent lines must be indented.
.*\n+
)*
)
# Lookahead for non-space at line-start, or end of doc.
(?:(?=^[ ]{0,%d}\S)|\Z)
''' % (less_than_tab, self.tab_width, self.tab_width),
re.X | re.M)
return footnote_def_re.sub(self._extract_footnote_def_sub, text)
_hr_res = [
re.compile(r"^[ ]{0,2}([ ]?\*[ ]?){3,}[ \t]*$", re.M),
re.compile(r"^[ ]{0,2}([ ]?\-[ ]?){3,}[ \t]*$", re.M),
re.compile(r"^[ ]{0,2}([ ]?\_[ ]?){3,}[ \t]*$", re.M),
]
def _run_block_gamut(self, text):
# These are all the transformations that form block-level
# tags like paragraphs, headers, and list items.
text = self._do_headers(text)
# Do Horizontal Rules:
hr = "\n<hr"+self.empty_element_suffix+"\n"
for hr_re in self._hr_res:
text = hr_re.sub(hr, text)
text = self._do_lists(text)
if "pyshell" in self.extras:
text = self._prepare_pyshell_blocks(text)
text = self._do_code_blocks(text)
text = self._do_block_quotes(text)
# We already ran _HashHTMLBlocks() before, in Markdown(), but that
# was to escape raw HTML in the original Markdown source. This time,
# we're escaping the markup we've just created, so that we don't wrap
# <p> tags around block-level tags.
text = self._hash_html_blocks(text)
text = self._form_paragraphs(text)
return text
def _pyshell_block_sub(self, match):
lines = match.group(0).splitlines(0)
_dedentlines(lines)
indent = ' ' * self.tab_width
s = ('\n' # separate from possible cuddled paragraph
+ indent + ('\n'+indent).join(lines)
+ '\n\n')
return s
def _prepare_pyshell_blocks(self, text):
"""Ensure that Python interactive shell sessions are put in
code blocks -- even if not properly indented.
"""
if ">>>" not in text:
return text
less_than_tab = self.tab_width - 1
_pyshell_block_re = re.compile(r"""
^([ ]{0,%d})>>>[ ].*\n # first line
^(\1.*\S+.*\n)* # any number of subsequent lines
^\n # ends with a blank line
""" % less_than_tab, re.M | re.X)
return _pyshell_block_re.sub(self._pyshell_block_sub, text)
def _run_span_gamut(self, text):
# These are all the transformations that occur *within* block-level
# tags like paragraphs, headers, and list items.
text = self._do_code_spans(text)
text = self._escape_special_chars(text)
# Process anchor and image tags.
text = self._do_links(text)
# Make links out of things like `<http://example.com/>`
# Must come after _do_links(), because you can use < and >
# delimiters in inline links like [this](<url>).
text = self._do_auto_links(text)
if "link-patterns" in self.extras:
text = self._do_link_patterns(text)
text = self._encode_amps_and_angles(text)
text = self._do_italics_and_bold(text)
# Do hard breaks:
text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text)
return text
# "Sorta" because auto-links are identified as "tag" tokens.
_sorta_html_tokenize_re = re.compile(r"""
(
# tag
</?
(?:\w+) # tag name
(?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))* # attributes
\s*/?>
|
# auto-link (e.g., <http://www.activestate.com/>)
<\w+[^>]*>
|
<!--.*?--> # comment
|
<\?.*?\?> # processing instruction
)
""", re.X)
def _escape_special_chars(self, text):
# Python markdown note: the HTML tokenization here differs from
# that in Markdown.pl, hence the behaviour for subtle cases can
# differ (I believe the tokenizer here does a better job because
# it isn't susceptible to unmatched '<' and '>' in HTML tags).
# Note, however, that '>' is not allowed in an auto-link URL
# here.
escaped = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup:
# Within tags/HTML-comments/auto-links, encode * and _
# so they don't conflict with their use in Markdown for
# italics and strong. We're replacing each such
# character with its corresponding MD5 checksum value;
# this is likely overkill, but it should prevent us from
# colliding with the escape values by accident.
escaped.append(token.replace('*', g_escape_table['*'])
.replace('_', g_escape_table['_']))
else:
escaped.append(self._encode_backslash_escapes(token))
is_html_markup = not is_html_markup
return ''.join(escaped)
def _hash_html_spans(self, text):
# Used for safe_mode.
def _is_auto_link(s):
if ':' in s and self._auto_link_re.match(s):
return True
elif '@' in s and self._auto_email_link_re.match(s):
return True
return False
tokens = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup and not _is_auto_link(token):
sanitized = self._sanitize_html(token)
key = _hash_text(sanitized)
self.html_spans[key] = sanitized
tokens.append(key)
else:
tokens.append(token)
is_html_markup = not is_html_markup
return ''.join(tokens)
def _unhash_html_spans(self, text):
for key, sanitized in self.html_spans.items():
text = text.replace(key, sanitized)
return text
def _sanitize_html(self, s):
if self.safe_mode == "replace":
return self.html_removed_text
elif self.safe_mode == "escape":
replacements = [
('&', '&'),
('<', '<'),
('>', '>'),
]
for before, after in replacements:
s = s.replace(before, after)
return s
else:
raise MarkdownError("invalid value for 'safe_mode': %r (must be "
"'escape' or 'replace')" % self.safe_mode)
_tail_of_inline_link_re = re.compile(r'''
# Match tail of: [text](/url/) or [text](/url/ "title")
\( # literal paren
[ \t]*
(?P<url> # \1
<.*?>
|
.*?
)
[ \t]*
( # \2
(['"]) # quote char = \3
(?P<title>.*?)
\3 # matching quote
)? # title is optional
\)
''', re.X | re.S)
_tail_of_reference_link_re = re.compile(r'''
# Match tail of: [text][id]
[ ]? # one optional space
(?:\n[ ]*)? # one optional newline followed by spaces
\[
(?P<id>.*?)
\]
''', re.X | re.S)
def _do_links(self, text):
"""Turn Markdown link shortcuts into XHTML <a> and <img> tags.
This is a combination of Markdown.pl's _DoAnchors() and
_DoImages(). They are done together because that simplified the
approach. It was necessary to use a different approach than
Markdown.pl because of the lack of atomic matching support in
Python's regex engine used in $g_nested_brackets.
"""
MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24
# `anchor_allowed_pos` is used to support img links inside
# anchors, but not anchors inside anchors. An anchor's start
# pos must be `>= anchor_allowed_pos`.
anchor_allowed_pos = 0
curr_pos = 0
while True: # Handle the next link.
# The next '[' is the start of:
# - an inline anchor: [text](url "title")
# - a reference anchor: [text][id]
# - an inline img: 
# - a reference img: ![text][id]
# - a footnote ref: [^id]
# (Only if 'footnotes' extra enabled)
# - a footnote defn: [^id]: ...
# (Only if 'footnotes' extra enabled) These have already
# been stripped in _strip_footnote_definitions() so no
# need to watch for them.
# - a link definition: [id]: url "title"
# These have already been stripped in
# _strip_link_definitions() so no need to watch for them.
# - not markup: [...anything else...
try:
start_idx = text.index('[', curr_pos)
except ValueError:
break
text_length = len(text)
# Find the matching closing ']'.
# Markdown.pl allows *matching* brackets in link text so we
# will here too. Markdown.pl *doesn't* currently allow
# matching brackets in img alt text -- we'll differ in that
# regard.
bracket_depth = 0
for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,
text_length)):
ch = text[p]
if ch == ']':
bracket_depth -= 1
if bracket_depth < 0:
break
elif ch == '[':
bracket_depth += 1
else:
# Closing bracket not found within sentinel length.
# This isn't markup.
curr_pos = start_idx + 1
continue
link_text = text[start_idx+1:p]
# Possibly a footnote ref?
if "footnotes" in self.extras and link_text.startswith("^"):
normed_id = re.sub(r'\W', '-', link_text[1:])
if normed_id in self.footnotes:
self.footnote_ids.append(normed_id)
result = '<sup class="footnote-ref" id="fnref-%s">' \
'<a href="#fn-%s">%s</a></sup>' \
% (normed_id, normed_id, len(self.footnote_ids))
text = text[:start_idx] + result + text[p+1:]
else:
# This id isn't defined, leave the markup alone.
curr_pos = p+1
continue
# Now determine what this is by the remainder.
p += 1
if p == text_length:
return text
# Inline anchor or img?
if text[p] == '(': # attempt at perf improvement
match = self._tail_of_inline_link_re.match(text, p)
if match:
# Handle an inline anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
url, title = match.group("url"), match.group("title")
if url and url[0] == '<':
url = url[1:-1] # '<url>' -> 'url'
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', g_escape_table['*']) \
.replace('_', g_escape_table['_'])
if title:
title_str = ' title="%s"' \
% title.replace('*', g_escape_table['*']) \
.replace('_', g_escape_table['_']) \
.replace('"', '"')
else:
title_str = ''
if is_img:
result = '<img src="%s" alt="%s"%s%s' \
% (url.replace('"', '"'),
link_text.replace('"', '"'),
title_str, self.empty_element_suffix)
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
elif start_idx >= anchor_allowed_pos:
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
continue
# Reference anchor or img?
else:
match = self._tail_of_reference_link_re.match(text, p)
if match:
# Handle a reference-style anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
link_id = match.group("id").lower()
if not link_id:
link_id = link_text.lower() # for links like [this][]
if link_id in self.urls:
url = self.urls[link_id]
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', g_escape_table['*']) \
.replace('_', g_escape_table['_'])
title = self.titles.get(link_id)
if title:
title = title.replace('*', g_escape_table['*']) \
.replace('_', g_escape_table['_'])
title_str = ' title="%s"' % title
else:
title_str = ''
if is_img:
result = '<img src="%s" alt="%s"%s%s' \
% (url.replace('"', '"'),
link_text.replace('"', '"'),
title_str, self.empty_element_suffix)
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
elif start_idx >= anchor_allowed_pos:
result = '<a href="%s"%s>%s</a>' \
% (url, title_str, link_text)
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
else:
# This id isn't defined, leave the markup alone.
curr_pos = match.end()
continue
# Otherwise, it isn't markup.
curr_pos = start_idx + 1
return text
_setext_h_re = re.compile(r'^(.+)[ \t]*\n(=+|-+)[ \t]*\n+', re.M)
def _setext_h_sub(self, match):
n = {"=": 1, "-": 2}[match.group(2)[0]]
demote_headers = self.extras.get("demote-headers")
if demote_headers:
n = min(n + demote_headers, 6)
return "<h%d>%s</h%d>\n\n" \
% (n, self._run_span_gamut(match.group(1)), n)
_atx_h_re = re.compile(r'''
^(\#{1,6}) # \1 = string of #'s
[ \t]*
(.+?) # \2 = Header text
[ \t]*
(?<!\\) # ensure not an escaped trailing '#'
\#* # optional closing #'s (not counted)
\n+
''', re.X | re.M)
def _atx_h_sub(self, match):
n = len(match.group(1))
demote_headers = self.extras.get("demote-headers")
if demote_headers:
n = min(n + demote_headers, 6)
return "<h%d>%s</h%d>\n\n" \
% (n, self._run_span_gamut(match.group(2)), n)
def _do_headers(self, text):
# Setext-style headers:
# Header 1
# ========
#
# Header 2
# --------
text = self._setext_h_re.sub(self._setext_h_sub, text)
# atx-style headers:
# # Header 1
# ## Header 2
# ## Header 2 with closing hashes ##
# ...
# ###### Header 6
text = self._atx_h_re.sub(self._atx_h_sub, text)
return text
_marker_ul_chars = '*+-'
_marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
_marker_ul = '(?:[%s])' % _marker_ul_chars
_marker_ol = r'(?:\d+\.)'
def _list_sub(self, match):
lst = match.group(1)
lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol"
result = self._process_list_items(lst)
if self.list_level:
return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type)
else:
return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type)
def _do_lists(self, text):
# Form HTML ordered (numbered) and unordered (bulleted) lists.
for marker_pat in (self._marker_ul, self._marker_ol):
# Re-usable pattern to match any entire ul or ol list:
less_than_tab = self.tab_width - 1
whole_list = r'''
( # \1 = whole list
( # \2
[ ]{0,%d}
(%s) # \3 = first list item marker
[ \t]+
)
(?:.+?)
( # \4
\Z
|
\n{2,}
(?=\S)
(?! # Negative lookahead for another list item marker
[ \t]*
%s[ \t]+
)
)
)
''' % (less_than_tab, marker_pat, marker_pat)
# We use a different prefix before nested lists than top-level lists.
# See extended comment in _process_list_items().
#
# Note: There's a bit of duplication here. My original implementation
# created a scalar regex pattern as the conditional result of the test on
# $g_list_level, and then only ran the $text =~ s{...}{...}egmx
# substitution once, using the scalar as the pattern. This worked,
# everywhere except when running under MT on my hosting account at Pair
# Networks. There, this caused all rebuilds to be killed by the reaper (or
# perhaps they crashed, but that seems incredibly unlikely given that the
# same script on the same server ran fine *except* under MT. I've spent
# more time trying to figure out why this is happening than I'd like to
# admit. My only guess, backed up by the fact that this workaround works,
# is that Perl optimizes the substition when it can figure out that the
# pattern will never change, and when this optimization isn't on, we run
# afoul of the reaper. Thus, the slightly redundant code to that uses two
# static s/// patterns rather than one conditional pattern.
if self.list_level:
sub_list_re = re.compile("^"+whole_list, re.X | re.M | re.S)
text = sub_list_re.sub(self._list_sub, text)
else:
list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list,
re.X | re.M | re.S)
text = list_re.sub(self._list_sub, text)
return text
_list_item_re = re.compile(r'''
(\n)? # leading line = \1
(^[ \t]*) # leading whitespace = \2
(%s) [ \t]+ # list marker = \3
((?:.+?) # list item text = \4
(\n{1,2})) # eols = \5
(?= \n* (\Z | \2 (%s) [ \t]+))
''' % (_marker_any, _marker_any),
re.M | re.X | re.S)
_last_li_endswith_two_eols = False
def _list_item_sub(self, match):
item = match.group(4)
leading_line = match.group(1)
leading_space = match.group(2)
if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
item = self._run_block_gamut(self._outdent(item))
else:
# Recursion for sub-lists:
item = self._do_lists(self._outdent(item))
if item.endswith('\n'):
item = item[:-1]
item = self._run_span_gamut(item)
self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
return "<li>%s</li>\n" % item
def _process_list_items(self, list_str):
# Process the contents of a single ordered or unordered list,
# splitting it into individual list items.
# The $g_list_level global keeps track of when we're inside a list.
# Each time we enter a list, we increment it; when we leave a list,
# we decrement. If it's zero, we're not in a list anymore.
#
# We do this because when we're not inside a list, we want to treat
# something like this:
#
# I recommend upgrading to version
# 8. Oops, now this line is treated
# as a sub-list.
#
# As a single paragraph, despite the fact that the second line starts
# with a digit-period-space sequence.
#
# Whereas when we're inside a list (or sub-list), that line will be
# treated as the start of a sub-list. What a kludge, huh? This is
# an aspect of Markdown's syntax that's hard to parse perfectly
# without resorting to mind-reading. Perhaps the solution is to
# change the syntax rules such that sub-lists must start with a
# starting cardinal number; e.g. "1." or "a.".
self.list_level += 1
self._last_li_endswith_two_eols = False
list_str = list_str.rstrip('\n') + '\n'
list_str = self._list_item_re.sub(self._list_item_sub, list_str)
self.list_level -= 1
return list_str
def _get_pygments_lexer(self, lexer_name):
try:
from pygments import lexers, util
except ImportError:
return None
try:
return lexers.get_lexer_by_name(lexer_name)
except util.ClassNotFound:
return None
def _color_with_pygments(self, codeblock, lexer, **formatter_opts):
import pygments
import pygments.formatters
class HtmlCodeFormatter(pygments.formatters.HtmlFormatter):
def _wrap_code(self, inner):
"""A function for use in a Pygments Formatter which
wraps in <code> tags.
"""
yield 0, "<code>"
for tup in inner:
yield tup
yield 0, "</code>"
def wrap(self, source, outfile):
"""Return the source with a code, pre, and div."""
return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
formatter = HtmlCodeFormatter(cssclass="codehilite", **formatter_opts)
return pygments.highlight(codeblock, lexer, formatter)
def _code_block_sub(self, match):
codeblock = match.group(1)
codeblock = self._outdent(codeblock)
codeblock = self._detab(codeblock)
codeblock = codeblock.lstrip('\n') # trim leading newlines
codeblock = codeblock.rstrip() # trim trailing whitespace
if "code-color" in self.extras and codeblock.startswith(":::"):
lexer_name, rest = codeblock.split('\n', 1)
lexer_name = lexer_name[3:].strip()
lexer = self._get_pygments_lexer(lexer_name)
codeblock = rest.lstrip("\n") # Remove lexer declaration line.
if lexer:
formatter_opts = self.extras['code-color'] or {}
colored = self._color_with_pygments(codeblock, lexer,
**formatter_opts)
return "\n\n%s\n\n" % colored
codeblock = self._encode_code(codeblock)
return "\n\n<pre><code>%s\n</code></pre>\n\n" % codeblock
def _do_code_blocks(self, text):
"""Process Markdown `<pre><code>` blocks."""
code_block_re = re.compile(r'''
(?:\n\n|\A)
( # $1 = the code block -- one or more lines, starting with a space/tab
(?:
(?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
.*\n+
)+
)
((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
''' % (self.tab_width, self.tab_width),
re.M | re.X)
return code_block_re.sub(self._code_block_sub, text)
# Rules for a code span:
# - backslash escapes are not interpreted in a code span
# - to include one or or a run of more backticks the delimiters must
# be a longer run of backticks
# - cannot start or end a code span with a backtick; pad with a
# space and that space will be removed in the emitted HTML
# See `test/tm-cases/escapes.text` for a number of edge-case
# examples.
_code_span_re = re.compile(r'''
(?<!\\)
(`+) # \1 = Opening run of `
(?!`) # See Note A test/tm-cases/escapes.text
(.+?) # \2 = The code block
(?<!`)
\1 # Matching closer
(?!`)
''', re.X | re.S)
def _code_span_sub(self, match):
c = match.group(2).strip(" \t")
c = self._encode_code(c)
return "<code>%s</code>" % c
def _do_code_spans(self, text):
# * Backtick quotes are used for <code></code> spans.
#
# * You can use multiple backticks as the delimiters if you want to
# include literal backticks in the code span. So, this input:
#
# Just type ``foo `bar` baz`` at the prompt.
#
# Will translate to:
#
# <p>Just type <code>foo `bar` baz</code> at the prompt.</p>
#
# There's no arbitrary limit to the number of backticks you
# can use as delimters. If you need three consecutive backticks
# in your code, use four for delimiters, etc.
#
# * You can use spaces to get literal backticks at the edges:
#
# ... type `` `bar` `` ...
#
# Turns to:
#
# ... type <code>`bar`</code> ...
return self._code_span_re.sub(self._code_span_sub, text)
def _encode_code(self, text):
"""Encode/escape certain characters inside Markdown code runs.
The point is that in code, these characters are literals,
and lose their special Markdown meanings.
"""
replacements = [
# Encode all ampersands; HTML entities are not
# entities within a Markdown code span.
('&', '&'),
# Do the angle bracket song and dance:
('<', '<'),
('>', '>'),
# Now, escape characters that are magic in Markdown:
('*', g_escape_table['*']),
('_', g_escape_table['_']),
('{', g_escape_table['{']),
('}', g_escape_table['}']),
('[', g_escape_table['[']),
(']', g_escape_table[']']),
('\\', g_escape_table['\\']),
]
for before, after in replacements:
text = text.replace(before, after)
return text
_strong_re = re.compile(r"(?<!\w)(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1(?!\w)", re.S)
_em_re = re.compile(r"(?<!\w)(\*|_)(?=\S)(.+?)(?<=\S)\1(?!\w)", re.S)
_code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
_code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
def _do_italics_and_bold(self, text):
# <strong> must go first:
if "code-friendly" in self.extras:
text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text)
text = self._code_friendly_em_re.sub(r"<em>\1</em>", text)
else:
text = self._strong_re.sub(r"<strong>\2</strong>", text)
text = self._em_re.sub(r"<em>\2</em>", text)
return text
_block_quote_re = re.compile(r'''
( # Wrap whole match in \1
(
^[ \t]*>[ \t]? # '>' at the start of a line
.+\n # rest of the first line
(.+\n)* # subsequent consecutive lines
\n* # blanks
)+
)
''', re.M | re.X)
_bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M);
_html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
def _dedent_two_spaces_sub(self, match):
return re.sub(r'(?m)^ ', '', match.group(1))
def _block_quote_sub(self, match):
bq = match.group(1)
bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting
bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines
bq = self._run_block_gamut(bq) # recurse
bq = re.sub('(?m)^', ' ', bq)
# These leading spaces screw with <pre> content, so we need to fix that:
bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
return "<blockquote>\n%s\n</blockquote>\n\n" % bq
def _do_block_quotes(self, text):
if '>' not in text:
return text
return self._block_quote_re.sub(self._block_quote_sub, text)
def _form_paragraphs(self, text):
# Strip leading and trailing lines:
text = text.strip('\n')
# Wrap <p> tags.
grafs = re.split(r"\n{2,}", text)
for i, graf in enumerate(grafs):
if graf in self.html_blocks:
# Unhashify HTML blocks
grafs[i] = self.html_blocks[graf]
else:
# Wrap <p> tags.
graf = self._run_span_gamut(graf)
grafs[i] = "<p>" + graf.lstrip(" \t") + "</p>"
return "\n\n".join(grafs)
def _add_footnotes(self, text):
if self.footnotes:
footer = [
'<div class="footnotes">',
'<hr' + self.empty_element_suffix,
'<ol>',
]
for i, id in enumerate(self.footnote_ids):
if i != 0:
footer.append('')
footer.append('<li id="fn-%s">' % id)
footer.append(self._run_block_gamut(self.footnotes[id]))
backlink = ('<a href="#fnref-%s" '
'class="footnoteBackLink" '
'title="Jump back to footnote %d in the text.">'
'↩</a>' % (id, i+1))
if footer[-1].endswith("</p>"):
footer[-1] = footer[-1][:-len("</p>")] \
+ ' ' + backlink + "</p>"
else:
footer.append("\n<p>%s</p>" % backlink)
footer.append('</li>')
footer.append('</ol>')
footer.append('</div>')
return text + '\n\n' + '\n'.join(footer)
else:
return text
# Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
# http://bumppo.net/projects/amputator/
_ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
_naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
_naked_gt_re = re.compile(r'''(?<![a-z?!/'"-])>''', re.I)
def _encode_amps_and_angles(self, text):
# Smart processing for ampersands and angle brackets that need
# to be encoded.
text = self._ampersand_re.sub('&', text)
# Encode naked <'s
text = self._naked_lt_re.sub('<', text)
# Encode naked >'s
# Note: Other markdown implementations (e.g. Markdown.pl, PHP
# Markdown) don't do this.
text = self._naked_gt_re.sub('>', text)
return text
def _encode_backslash_escapes(self, text):
for ch, escape in g_escape_table.items():
text = text.replace("\\"+ch, escape)
return text
_auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
def _auto_link_sub(self, match):
g1 = match.group(1)
return '<a href="%s">%s</a>' % (g1, g1)
_auto_email_link_re = re.compile(r"""
<
(?:mailto:)?
(
[-.\w]+
\@
[-\w]+(\.[-\w]+)*\.[a-z]+
)
>
""", re.I | re.X | re.U)
def _auto_email_link_sub(self, match):
return self._encode_email_address(
self._unescape_special_chars(match.group(1)))
def _do_auto_links(self, text):
text = self._auto_link_re.sub(self._auto_link_sub, text)
text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
return text
def _encode_email_address(self, addr):
# Input: an email address, e.g. "foo@example.com"
#
# Output: the email address as a mailto link, with each character
# of the address encoded as either a decimal or hex entity, in
# the hopes of foiling most address harvesting spam bots. E.g.:
#
# <a href="mailto:foo@e
# xample.com">foo
# @example.com</a>
#
# Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
# mailing list: <http://tinyurl.com/yu7ue>
chars = [_xml_encode_email_char_at_random(ch)
for ch in "mailto:" + addr]
# Strip the mailto: from the visible part.
addr = '<a href="%s">%s</a>' \
% (''.join(chars), ''.join(chars[7:]))
return addr
def _do_link_patterns(self, text):
"""Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this.
"""
link_from_hash = {}
for regex, repl in self.link_patterns:
replacements = []
for match in regex.finditer(text):
if hasattr(repl, "__call__"):
href = repl(match)
else:
href = match.expand(repl)
replacements.append((match.span(), href))
for (start, end), href in reversed(replacements):
escaped_href = (
href.replace('"', '"') # b/c of attr quote
# To avoid markdown <em> and <strong>:
.replace('*', g_escape_table['*'])
.replace('_', g_escape_table['_']))
link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
hash = _hash_text(link)
link_from_hash[hash] = link
text = text[:start] + hash + text[end:]
for hash, link in link_from_hash.items():
text = text.replace(hash, link)
return text
def _unescape_special_chars(self, text):
# Swap back in all the special characters we've hidden.
for ch, hash in g_escape_table.items():
text = text.replace(hash, ch)
return text
def _outdent(self, text):
# Remove one level of line-leading tabs or spaces
return self._outdent_re.sub('', text)
class MarkdownWithExtras(Markdown):
"""A markdowner class that enables most extras:
- footnotes
- code-color (only has effect if 'pygments' Python module on path)
These are not included:
- pyshell (specific to Python-related documenting)
- code-friendly (because it *disables* part of the syntax)
- link-patterns (because you need to specify some actual
link-patterns anyway)
"""
extras = ["footnotes", "code-color"]
#---- internal support functions
# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def _curry(*args, **kwargs):
function, args = args[0], args[1:]
def result(*rest, **kwrest):
combined = kwargs.copy()
combined.update(kwrest)
return function(*args + rest, **combined)
return result
# Recipe: regex_from_encoded_pattern (1.0)
def _regex_from_encoded_pattern(s):
"""'foo' -> re.compile(re.escape('foo'))
'/foo/' -> re.compile('foo')
'/foo/i' -> re.compile('foo', re.I)
"""
if s.startswith('/') and s.rfind('/') != 0:
# Parse it: /PATTERN/FLAGS
idx = s.rfind('/')
pattern, flags_str = s[1:idx], s[idx+1:]
flag_from_char = {
"i": re.IGNORECASE,
"l": re.LOCALE,
"s": re.DOTALL,
"m": re.MULTILINE,
"u": re.UNICODE,
}
flags = 0
for char in flags_str:
try:
flags |= flag_from_char[char]
except KeyError:
raise ValueError("unsupported regex flag: '%s' in '%s' "
"(must be one of '%s')"
% (char, s, ''.join(flag_from_char.keys())))
return re.compile(s[1:idx], flags)
else: # not an encoded regex
return re.compile(re.escape(s))
# Recipe: dedent (0.1.2)
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line)
indents = []
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
indent = 0
for ch in line:
if ch == ' ':
indent += 1
elif ch == '\t':
indent += tabsize - (indent % tabsize)
elif ch in '\r\n':
continue # skip all-whitespace lines
else:
break
else:
continue # skip all-whitespace lines
if DEBUG: print "dedent: indent=%d: %r" % (indent, line)
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if DEBUG: print "dedent: margin=%r" % margin
if margin is not None and margin > 0:
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
removed = 0
for j, ch in enumerate(line):
if ch == ' ':
removed += 1
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line
lines[i] = lines[i][j:]
break
else:
raise ValueError("unexpected non-whitespace char %r in "
"line %r while removing %d-space margin"
% (ch, line, margin))
if DEBUG:
print "dedent: %r: %r -> removed %d/%d"\
% (line, ch, removed, margin)
if removed == margin:
lines[i] = lines[i][j+1:]
break
elif removed > margin:
lines[i] = ' '*(removed-margin) + lines[i][j+1:]
break
else:
if removed:
lines[i] = lines[i][removed:]
return lines
def _dedent(text, tabsize=8, skip_first_line=False):
"""_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines)
class _memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
http://wiki.python.org/moin/PythonDecoratorLibrary
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
self.cache[args] = value = self.func(*args)
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def _xml_oneliner_re_from_tab_width(tab_width):
"""Standalone XML processing instruction regex."""
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in $1
[ ]{0,%d}
(?:
<\?\w+\b\s+.*?\?> # XML processing instruction
|
<\w+:\w+\b\s+.*?/> # namespaced single tag
)
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
def _hr_tag_re_from_tab_width(tab_width):
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in \1
[ ]{0,%d}
<(hr) # start tag = \2
\b # word break
([^<>])*? #
/?> # the matching end tag
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
def _xml_encode_email_char_at_random(ch):
r = random()
# Roughly 10% raw, 45% hex, 45% dec.
# '@' *must* be encoded. I [John Gruber] insist.
# Issue 26: '_' must be encoded.
if r > 0.9 and ch not in "@_":
return ch
elif r < 0.45:
# The [1:] is to drop leading '0': 0x63 -> x63
return '&#%s;' % hex(ord(ch))[1:]
else:
return '&#%s;' % ord(ch)
#---- mainline
class _NoReflowFormatter(optparse.IndentedHelpFormatter):
"""An optparse formatter that does NOT reflow the description."""
def format_description(self, description):
return description or ""
def _test():
import doctest
doctest.testmod()
def main(argv=None):
if argv is None:
argv = sys.argv
if not logging.root.handlers:
logging.basicConfig()
usage = "usage: %prog [PATHS...]"
version = "%prog "+__version__
parser = optparse.OptionParser(prog="markdown2", usage=usage,
version=version, description=cmdln_desc,
formatter=_NoReflowFormatter())
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("--encoding",
help="specify encoding of text content")
parser.add_option("--html4tags", action="store_true", default=False,
help="use HTML 4 style for empty element tags")
parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
help="sanitize literal HTML: 'escape' escapes "
"HTML meta chars, 'replace' replaces with an "
"[HTML_REMOVED] note")
parser.add_option("-x", "--extras", action="append",
help="Turn on specific extra features (not part of "
"the core Markdown spec). Supported values: "
"'code-friendly' disables _/__ for emphasis; "
"'code-color' adds code-block syntax coloring; "
"'link-patterns' adds auto-linking based on patterns; "
"'footnotes' adds the footnotes syntax;"
"'xml' passes one-liner processing instructions and namespaced XML tags;"
"'pyshell' to put unindented Python interactive shell sessions in a <code> block.")
parser.add_option("--use-file-vars",
help="Look for and use Emacs-style 'markdown-extras' "
"file var to turn on extras. See "
"<http://code.google.com/p/python-markdown2/wiki/Extras>.")
parser.add_option("--link-patterns-file",
help="path to a link pattern file")
parser.add_option("--self-test", action="store_true",
help="run internal self-tests (some doctests)")
parser.add_option("--compare", action="store_true",
help="run against Markdown.pl as well (for testing)")
parser.set_defaults(log_level=logging.INFO, compare=False,
encoding="utf-8", safe_mode=None, use_file_vars=False)
opts, paths = parser.parse_args()
log.setLevel(opts.log_level)
if opts.self_test:
return _test()
if opts.extras:
extras = {}
for s in opts.extras:
splitter = re.compile("[,;: ]+")
for e in splitter.split(s):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
extras[ename] = earg
else:
extras = None
if opts.link_patterns_file:
link_patterns = []
f = open(opts.link_patterns_file)
try:
for i, line in enumerate(f.readlines()):
if not line.strip(): continue
if line.lstrip().startswith("#"): continue
try:
pat, href = line.rstrip().rsplit(None, 1)
except ValueError:
raise MarkdownError("%s:%d: invalid link pattern line: %r"
% (opts.link_patterns_file, i+1, line))
link_patterns.append(
(_regex_from_encoded_pattern(pat), href))
finally:
f.close()
else:
link_patterns = None
from os.path import join, dirname, abspath, exists
markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
"Markdown.pl")
for path in paths:
if opts.compare:
print "==== Markdown.pl ===="
perl_cmd = 'perl %s "%s"' % (markdown_pl, path)
o = os.popen(perl_cmd)
perl_html = o.read()
o.close()
sys.stdout.write(perl_html)
print "==== markdown2.py ===="
html = markdown_path(path, encoding=opts.encoding,
html4tags=opts.html4tags,
safe_mode=opts.safe_mode,
extras=extras, link_patterns=link_patterns,
use_file_vars=opts.use_file_vars)
sys.stdout.write(
html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if opts.compare:
test_dir = join(dirname(dirname(abspath(__file__))), "test")
if exists(join(test_dir, "test_markdown2.py")):
sys.path.insert(0, test_dir)
from test_markdown2 import norm_html_from_html
norm_html = norm_html_from_html(html)
norm_perl_html = norm_html_from_html(perl_html)
else:
norm_html = html
norm_perl_html = perl_html
print "==== match? %r ====" % (norm_perl_html == norm_html)
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| Python |
from markdown2 import *
from gluon.html import XML
def WIKI(text, encoding="utf8", safe_mode='escape', html4tags=False, **attributes):
if not text:
test = ''
if attributes.has_key('extras'):
extras = attributes['extras']
del attributes['extras']
else:
extras=None
text = text.decode(encoding,'replace')
return XML(markdown(text,extras=extras,
safe_mode=safe_mode, html4tags=html4tags)\
.encode(encoding,'xmlcharrefreplace'),**attributes)
| Python |
# -*- coding: utf-8 -*-
# PyPyODBC is develped from RealPyODBC 0.1 beta released in 2004 by Michele Petrazzo. Thanks Michele.
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO #EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys, os, datetime, ctypes, threading
from decimal import Decimal
try:
bytearray
except NameError:
# pre version 2.6 python does not have the bytearray type
bytearray = str
if not hasattr(ctypes, 'c_ssize_t'):
if ctypes.sizeof(ctypes.c_uint) == ctypes.sizeof(ctypes.c_void_p):
ctypes.c_ssize_t = ctypes.c_int
elif ctypes.sizeof(ctypes.c_ulong) == ctypes.sizeof(ctypes.c_void_p):
ctypes.c_ssize_t = ctypes.c_long
elif ctypes.sizeof(ctypes.c_ulonglong) == ctypes.sizeof(ctypes.c_void_p):
ctypes.c_ssize_t = ctypes.c_longlong
DEBUG = 0
# Comment out all "if DEBUG:" statements like below for production
if DEBUG: print 'DEBUGGING'
pooling = True
lock = threading.Lock()
shared_env_h = None
apilevel = '2.0'
paramstyle = 'qmark'
threadsafety = 1
version = '0.9.3'
lowercase=True
SQLWCHAR_SIZE = ctypes.sizeof(ctypes.c_wchar)
#determin the size of Py_UNICODE
#sys.maxunicode > 65536 and 'UCS4' or 'UCS2'
UNICODE_SIZE = sys.maxunicode > 65536 and 4 or 2
# Define ODBC constants. They are widly used in ODBC documents and programs
# They are defined in cpp header files: sql.h sqlext.h sqltypes.h sqlucode.h
# and you can get these files from the mingw32-runtime_3.13-1_all.deb package
SQL_ATTR_ODBC_VERSION, SQL_OV_ODBC2, SQL_OV_ODBC3 = 200, 2, 3
SQL_DRIVER_NOPROMPT = 0
SQL_ATTR_CONNECTION_POOLING = 201; SQL_CP_ONE_PER_HENV = 2
SQL_FETCH_NEXT, SQL_FETCH_FIRST, SQL_FETCH_LAST = 0x01, 0x02, 0x04
SQL_NULL_HANDLE, SQL_HANDLE_ENV, SQL_HANDLE_DBC, SQL_HANDLE_STMT = 0, 1, 2, 3
SQL_SUCCESS, SQL_SUCCESS_WITH_INFO = 0, 1
SQL_NO_DATA = 100; SQL_NO_TOTAL = -4
SQL_ATTR_ACCESS_MODE = SQL_ACCESS_MODE = 101
SQL_ATTR_AUTOCOMMIT = SQL_AUTOCOMMIT = 102
SQL_MODE_DEFAULT = SQL_MODE_READ_WRITE = 0; SQL_MODE_READ_ONLY = 1
SQL_AUTOCOMMIT_OFF, SQL_AUTOCOMMIT_ON = 0, 1
SQL_IS_UINTEGER = -5
SQL_ATTR_LOGIN_TIMEOUT = 103; SQL_ATTR_CONNECTION_TIMEOUT = 113
SQL_COMMIT, SQL_ROLLBACK = 0, 1
SQL_INDEX_UNIQUE,SQL_INDEX_ALL = 0,1
SQL_QUICK,SQL_ENSURE = 0,1
SQL_FETCH_NEXT = 1
SQL_COLUMN_DISPLAY_SIZE = 6
SQL_INVALID_HANDLE = -2
SQL_NO_DATA_FOUND = 100; SQL_NULL_DATA = -1; SQL_NTS = -3
SQL_HANDLE_DESCR = 4
SQL_TABLE_NAMES = 3
SQL_PARAM_INPUT = 1; SQL_PARAM_INPUT_OUTPUT = 2
SQL_PARAM_TYPE_UNKNOWN = 0
SQL_RESULT_COL = 3
SQL_PARAM_OUTPUT = 4
SQL_RETURN_VALUE = 5
SQL_PARAM_TYPE_DEFAULT = SQL_PARAM_INPUT_OUTPUT
SQL_RESET_PARAMS = 3
SQL_UNBIND = 2
SQL_CLOSE = 0
SQL_TYPE_NULL = 0
SQL_DECIMAL = 3
SQL_FLOAT = 6
SQL_DATE = 9
SQL_TIME = 10
SQL_TIMESTAMP = 11
SQL_VARCHAR = 12
SQL_LONGVARCHAR = -1
SQL_VARBINARY = -3
SQL_LONGVARBINARY = -4
SQL_BIGINT = -5
SQL_WVARCHAR = -9
SQL_WLONGVARCHAR = -10
SQL_ALL_TYPES = 0
SQL_SIGNED_OFFSET = -20
SQL_C_CHAR = SQL_CHAR = 1
SQL_C_NUMERIC = SQL_NUMERIC = 2
SQL_C_LONG = SQL_INTEGER = 4
SQL_C_SLONG = SQL_C_LONG + SQL_SIGNED_OFFSET
SQL_C_SHORT = SQL_SMALLINT = 5
SQL_C_FLOAT = SQL_REAL = 7
SQL_C_DOUBLE = SQL_DOUBLE = 8
SQL_C_TYPE_DATE = SQL_TYPE_DATE = 91
SQL_C_TYPE_TIME = SQL_TYPE_TIME = 92
SQL_C_BINARY = SQL_BINARY = -2
SQL_C_SBIGINT = SQL_BIGINT + SQL_SIGNED_OFFSET
SQL_C_TINYINT = SQL_TINYINT = -6
SQL_C_BIT = SQL_BIT = -7
SQL_C_WCHAR = SQL_WCHAR = -8
SQL_C_GUID = SQL_GUID = -11
SQL_C_TYPE_TIMESTAMP = SQL_TYPE_TIMESTAMP = 93
SQL_C_DEFAULT = 99
SQL_SS_TIME2 = -154
SQL_DESC_DISPLAY_SIZE = SQL_COLUMN_DISPLAY_SIZE
def dttm_cvt(x):
if x == '': return None
else: return datetime.datetime(int(x[0:4]),int(x[5:7]),int(x[8:10]),int(x[10:13]),int(x[14:16]),int(x[17:19]),int(x[20:].ljust(6,'0')))
def tm_cvt(x):
if x == '': return None
else: return datetime.time(int(x[0:2]),int(x[3:5]),int(x[6:8]),int(x[9:].ljust(6,'0')))
def dt_cvt(x):
if x == '': return None
else: return datetime.date(int(x[0:4]),int(x[5:7]),int(x[8:10]))
# Below defines The constants for sqlgetinfo method, and their coresponding return types
SQL_QUALIFIER_LOCATION = 114
SQL_QUALIFIER_NAME_SEPARATOR = 41
SQL_QUALIFIER_TERM = 42
SQL_QUALIFIER_USAGE = 92
SQL_OWNER_TERM = 39
SQL_OWNER_USAGE = 91
SQL_ACCESSIBLE_PROCEDURES = 20
SQL_ACCESSIBLE_TABLES = 19
SQL_ACTIVE_ENVIRONMENTS = 116
SQL_AGGREGATE_FUNCTIONS = 169
SQL_ALTER_DOMAIN = 117
SQL_ALTER_TABLE = 86
SQL_ASYNC_MODE = 10021
SQL_BATCH_ROW_COUNT = 120
SQL_BATCH_SUPPORT = 121
SQL_BOOKMARK_PERSISTENCE = 82
SQL_CATALOG_LOCATION = SQL_QUALIFIER_LOCATION
SQL_CATALOG_NAME = 10003
SQL_CATALOG_NAME_SEPARATOR = SQL_QUALIFIER_NAME_SEPARATOR
SQL_CATALOG_TERM = SQL_QUALIFIER_TERM
SQL_CATALOG_USAGE = SQL_QUALIFIER_USAGE
SQL_COLLATION_SEQ = 10004
SQL_COLUMN_ALIAS = 87
SQL_CONCAT_NULL_BEHAVIOR = 22
SQL_CONVERT_FUNCTIONS = 48
SQL_CONVERT_VARCHAR = 70
SQL_CORRELATION_NAME = 74
SQL_CREATE_ASSERTION = 127
SQL_CREATE_CHARACTER_SET = 128
SQL_CREATE_COLLATION = 129
SQL_CREATE_DOMAIN = 130
SQL_CREATE_SCHEMA = 131
SQL_CREATE_TABLE = 132
SQL_CREATE_TRANSLATION = 133
SQL_CREATE_VIEW = 134
SQL_CURSOR_COMMIT_BEHAVIOR = 23
SQL_CURSOR_ROLLBACK_BEHAVIOR = 24
SQL_DATABASE_NAME = 16
SQL_DATA_SOURCE_NAME = 2
SQL_DATA_SOURCE_READ_ONLY = 25
SQL_DATETIME_LITERALS = 119
SQL_DBMS_NAME = 17
SQL_DBMS_VER = 18
SQL_DDL_INDEX = 170
SQL_DEFAULT_TXN_ISOLATION = 26
SQL_DESCRIBE_PARAMETER = 10002
SQL_DM_VER = 171
SQL_DRIVER_NAME = 6
SQL_DRIVER_ODBC_VER = 77
SQL_DRIVER_VER = 7
SQL_DROP_ASSERTION = 136
SQL_DROP_CHARACTER_SET = 137
SQL_DROP_COLLATION = 138
SQL_DROP_DOMAIN = 139
SQL_DROP_SCHEMA = 140
SQL_DROP_TABLE = 141
SQL_DROP_TRANSLATION = 142
SQL_DROP_VIEW = 143
SQL_DYNAMIC_CURSOR_ATTRIBUTES1 = 144
SQL_DYNAMIC_CURSOR_ATTRIBUTES2 = 145
SQL_EXPRESSIONS_IN_ORDERBY = 27
SQL_FILE_USAGE = 84
SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1 = 146
SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2 = 147
SQL_GETDATA_EXTENSIONS = 81
SQL_GROUP_BY = 88
SQL_IDENTIFIER_CASE = 28
SQL_IDENTIFIER_QUOTE_CHAR = 29
SQL_INDEX_KEYWORDS = 148
SQL_INFO_SCHEMA_VIEWS = 149
SQL_INSERT_STATEMENT = 172
SQL_INTEGRITY = 73
SQL_KEYSET_CURSOR_ATTRIBUTES1 = 150
SQL_KEYSET_CURSOR_ATTRIBUTES2 = 151
SQL_KEYWORDS = 89
SQL_LIKE_ESCAPE_CLAUSE = 113
SQL_MAX_ASYNC_CONCURRENT_STATEMENTS = 10022
SQL_MAX_BINARY_LITERAL_LEN = 112
SQL_MAX_CATALOG_NAME_LEN = 34
SQL_MAX_CHAR_LITERAL_LEN = 108
SQL_MAX_COLUMNS_IN_GROUP_BY = 97
SQL_MAX_COLUMNS_IN_INDEX = 98
SQL_MAX_COLUMNS_IN_ORDER_BY = 99
SQL_MAX_COLUMNS_IN_SELECT = 100
SQL_MAX_COLUMNS_IN_TABLE = 101
SQL_MAX_COLUMN_NAME_LEN = 30
SQL_MAX_CONCURRENT_ACTIVITIES = 1
SQL_MAX_CURSOR_NAME_LEN = 31
SQL_MAX_DRIVER_CONNECTIONS = 0
SQL_MAX_IDENTIFIER_LEN = 10005
SQL_MAX_INDEX_SIZE = 102
SQL_MAX_PROCEDURE_NAME_LEN = 33
SQL_MAX_ROW_SIZE = 104
SQL_MAX_ROW_SIZE_INCLUDES_LONG = 103
SQL_MAX_SCHEMA_NAME_LEN = 32
SQL_MAX_STATEMENT_LEN = 105
SQL_MAX_TABLES_IN_SELECT = 106
SQL_MAX_TABLE_NAME_LEN = 35
SQL_MAX_USER_NAME_LEN = 107
SQL_MULTIPLE_ACTIVE_TXN = 37
SQL_MULT_RESULT_SETS = 36
SQL_NEED_LONG_DATA_LEN = 111
SQL_NON_NULLABLE_COLUMNS = 75
SQL_NULL_COLLATION = 85
SQL_NUMERIC_FUNCTIONS = 49
SQL_ODBC_INTERFACE_CONFORMANCE = 152
SQL_ODBC_VER = 10
SQL_OJ_CAPABILITIES = 65003
SQL_ORDER_BY_COLUMNS_IN_SELECT = 90
SQL_PARAM_ARRAY_ROW_COUNTS = 153
SQL_PARAM_ARRAY_SELECTS = 154
SQL_PROCEDURES = 21
SQL_PROCEDURE_TERM = 40
SQL_QUOTED_IDENTIFIER_CASE = 93
SQL_ROW_UPDATES = 11
SQL_SCHEMA_TERM = SQL_OWNER_TERM
SQL_SCHEMA_USAGE = SQL_OWNER_USAGE
SQL_SCROLL_OPTIONS = 44
SQL_SEARCH_PATTERN_ESCAPE = 14
SQL_SERVER_NAME = 13
SQL_SPECIAL_CHARACTERS = 94
SQL_SQL92_DATETIME_FUNCTIONS = 155
SQL_SQL92_FOREIGN_KEY_DELETE_RULE = 156
SQL_SQL92_FOREIGN_KEY_UPDATE_RULE = 157
SQL_SQL92_GRANT = 158
SQL_SQL92_NUMERIC_VALUE_FUNCTIONS = 159
SQL_SQL92_PREDICATES = 160
SQL_SQL92_RELATIONAL_JOIN_OPERATORS = 161
SQL_SQL92_REVOKE = 162
SQL_SQL92_ROW_VALUE_CONSTRUCTOR = 163
SQL_SQL92_STRING_FUNCTIONS = 164
SQL_SQL92_VALUE_EXPRESSIONS = 165
SQL_SQL_CONFORMANCE = 118
SQL_STANDARD_CLI_CONFORMANCE = 166
SQL_STATIC_CURSOR_ATTRIBUTES1 = 167
SQL_STATIC_CURSOR_ATTRIBUTES2 = 168
SQL_STRING_FUNCTIONS = 50
SQL_SUBQUERIES = 95
SQL_SYSTEM_FUNCTIONS = 51
SQL_TABLE_TERM = 45
SQL_TIMEDATE_ADD_INTERVALS = 109
SQL_TIMEDATE_DIFF_INTERVALS = 110
SQL_TIMEDATE_FUNCTIONS = 52
SQL_TXN_CAPABLE = 46
SQL_TXN_ISOLATION_OPTION = 72
SQL_UNION = 96
SQL_USER_NAME = 47
SQL_XOPEN_CLI_YEAR = 10000
aInfoTypes = {
SQL_ACCESSIBLE_PROCEDURES : 'GI_YESNO',SQL_ACCESSIBLE_TABLES : 'GI_YESNO',SQL_ACTIVE_ENVIRONMENTS : 'GI_USMALLINT',
SQL_AGGREGATE_FUNCTIONS : 'GI_UINTEGER',SQL_ALTER_DOMAIN : 'GI_UINTEGER',
SQL_ALTER_TABLE : 'GI_UINTEGER',SQL_ASYNC_MODE : 'GI_UINTEGER',SQL_BATCH_ROW_COUNT : 'GI_UINTEGER',
SQL_BATCH_SUPPORT : 'GI_UINTEGER',SQL_BOOKMARK_PERSISTENCE : 'GI_UINTEGER',SQL_CATALOG_LOCATION : 'GI_USMALLINT',
SQL_CATALOG_NAME : 'GI_YESNO',SQL_CATALOG_NAME_SEPARATOR : 'GI_STRING',SQL_CATALOG_TERM : 'GI_STRING',
SQL_CATALOG_USAGE : 'GI_UINTEGER',SQL_COLLATION_SEQ : 'GI_STRING',SQL_COLUMN_ALIAS : 'GI_YESNO',
SQL_CONCAT_NULL_BEHAVIOR : 'GI_USMALLINT',SQL_CONVERT_FUNCTIONS : 'GI_UINTEGER',
SQL_CONVERT_VARCHAR : 'GI_UINTEGER',SQL_CORRELATION_NAME : 'GI_USMALLINT',
SQL_CREATE_ASSERTION : 'GI_UINTEGER',SQL_CREATE_CHARACTER_SET : 'GI_UINTEGER',
SQL_CREATE_COLLATION : 'GI_UINTEGER',SQL_CREATE_DOMAIN : 'GI_UINTEGER',SQL_CREATE_SCHEMA : 'GI_UINTEGER',
SQL_CREATE_TABLE : 'GI_UINTEGER',SQL_CREATE_TRANSLATION : 'GI_UINTEGER',SQL_CREATE_VIEW : 'GI_UINTEGER',
SQL_CURSOR_COMMIT_BEHAVIOR : 'GI_USMALLINT',SQL_CURSOR_ROLLBACK_BEHAVIOR : 'GI_USMALLINT',SQL_DATABASE_NAME : 'GI_STRING',
SQL_DATA_SOURCE_NAME : 'GI_STRING',SQL_DATA_SOURCE_READ_ONLY : 'GI_YESNO',SQL_DATETIME_LITERALS : 'GI_UINTEGER',
SQL_DBMS_NAME : 'GI_STRING',SQL_DBMS_VER : 'GI_STRING',SQL_DDL_INDEX : 'GI_UINTEGER',
SQL_DEFAULT_TXN_ISOLATION : 'GI_UINTEGER',SQL_DESCRIBE_PARAMETER : 'GI_YESNO',SQL_DM_VER : 'GI_STRING',
SQL_DRIVER_NAME : 'GI_STRING',SQL_DRIVER_ODBC_VER : 'GI_STRING',SQL_DRIVER_VER : 'GI_STRING',
SQL_DROP_ASSERTION : 'GI_UINTEGER',SQL_DROP_CHARACTER_SET : 'GI_UINTEGER',
SQL_DROP_COLLATION : 'GI_UINTEGER',SQL_DROP_DOMAIN : 'GI_UINTEGER',
SQL_DROP_SCHEMA : 'GI_UINTEGER',SQL_DROP_TABLE : 'GI_UINTEGER',SQL_DROP_TRANSLATION : 'GI_UINTEGER',
SQL_DROP_VIEW : 'GI_UINTEGER',SQL_DYNAMIC_CURSOR_ATTRIBUTES1 : 'GI_UINTEGER',SQL_DYNAMIC_CURSOR_ATTRIBUTES2 : 'GI_UINTEGER',
SQL_EXPRESSIONS_IN_ORDERBY : 'GI_YESNO',SQL_FILE_USAGE : 'GI_USMALLINT',
SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1 : 'GI_UINTEGER',SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2 : 'GI_UINTEGER',
SQL_GETDATA_EXTENSIONS : 'GI_UINTEGER',SQL_GROUP_BY : 'GI_USMALLINT',SQL_IDENTIFIER_CASE : 'GI_USMALLINT',
SQL_IDENTIFIER_QUOTE_CHAR : 'GI_STRING',SQL_INDEX_KEYWORDS : 'GI_UINTEGER',SQL_INFO_SCHEMA_VIEWS : 'GI_UINTEGER',
SQL_INSERT_STATEMENT : 'GI_UINTEGER',SQL_INTEGRITY : 'GI_YESNO',SQL_KEYSET_CURSOR_ATTRIBUTES1 : 'GI_UINTEGER',
SQL_KEYSET_CURSOR_ATTRIBUTES2 : 'GI_UINTEGER',SQL_KEYWORDS : 'GI_STRING',
SQL_LIKE_ESCAPE_CLAUSE : 'GI_YESNO',SQL_MAX_ASYNC_CONCURRENT_STATEMENTS : 'GI_UINTEGER',
SQL_MAX_BINARY_LITERAL_LEN : 'GI_UINTEGER',SQL_MAX_CATALOG_NAME_LEN : 'GI_USMALLINT',
SQL_MAX_CHAR_LITERAL_LEN : 'GI_UINTEGER',SQL_MAX_COLUMNS_IN_GROUP_BY : 'GI_USMALLINT',
SQL_MAX_COLUMNS_IN_INDEX : 'GI_USMALLINT',SQL_MAX_COLUMNS_IN_ORDER_BY : 'GI_USMALLINT',
SQL_MAX_COLUMNS_IN_SELECT : 'GI_USMALLINT',SQL_MAX_COLUMNS_IN_TABLE : 'GI_USMALLINT',
SQL_MAX_COLUMN_NAME_LEN : 'GI_USMALLINT',SQL_MAX_CONCURRENT_ACTIVITIES : 'GI_USMALLINT',
SQL_MAX_CURSOR_NAME_LEN : 'GI_USMALLINT',SQL_MAX_DRIVER_CONNECTIONS : 'GI_USMALLINT',
SQL_MAX_IDENTIFIER_LEN : 'GI_USMALLINT',SQL_MAX_INDEX_SIZE : 'GI_UINTEGER',
SQL_MAX_PROCEDURE_NAME_LEN : 'GI_USMALLINT',SQL_MAX_ROW_SIZE : 'GI_UINTEGER',
SQL_MAX_ROW_SIZE_INCLUDES_LONG : 'GI_YESNO',SQL_MAX_SCHEMA_NAME_LEN : 'GI_USMALLINT',
SQL_MAX_STATEMENT_LEN : 'GI_UINTEGER',SQL_MAX_TABLES_IN_SELECT : 'GI_USMALLINT',
SQL_MAX_TABLE_NAME_LEN : 'GI_USMALLINT',SQL_MAX_USER_NAME_LEN : 'GI_USMALLINT',
SQL_MULTIPLE_ACTIVE_TXN : 'GI_YESNO',SQL_MULT_RESULT_SETS : 'GI_YESNO',
SQL_NEED_LONG_DATA_LEN : 'GI_YESNO',SQL_NON_NULLABLE_COLUMNS : 'GI_USMALLINT',
SQL_NULL_COLLATION : 'GI_USMALLINT',SQL_NUMERIC_FUNCTIONS : 'GI_UINTEGER',
SQL_ODBC_INTERFACE_CONFORMANCE : 'GI_UINTEGER',SQL_ODBC_VER : 'GI_STRING',SQL_OJ_CAPABILITIES : 'GI_UINTEGER',
SQL_ORDER_BY_COLUMNS_IN_SELECT : 'GI_YESNO',SQL_PARAM_ARRAY_ROW_COUNTS : 'GI_UINTEGER',
SQL_PARAM_ARRAY_SELECTS : 'GI_UINTEGER',SQL_PROCEDURES : 'GI_YESNO',SQL_PROCEDURE_TERM : 'GI_STRING',
SQL_QUOTED_IDENTIFIER_CASE : 'GI_USMALLINT',SQL_ROW_UPDATES : 'GI_YESNO',SQL_SCHEMA_TERM : 'GI_STRING',
SQL_SCHEMA_USAGE : 'GI_UINTEGER',SQL_SCROLL_OPTIONS : 'GI_UINTEGER',SQL_SEARCH_PATTERN_ESCAPE : 'GI_STRING',
SQL_SERVER_NAME : 'GI_STRING',SQL_SPECIAL_CHARACTERS : 'GI_STRING',SQL_SQL92_DATETIME_FUNCTIONS : 'GI_UINTEGER',
SQL_SQL92_FOREIGN_KEY_DELETE_RULE : 'GI_UINTEGER',SQL_SQL92_FOREIGN_KEY_UPDATE_RULE : 'GI_UINTEGER',
SQL_SQL92_GRANT : 'GI_UINTEGER',SQL_SQL92_NUMERIC_VALUE_FUNCTIONS : 'GI_UINTEGER',
SQL_SQL92_PREDICATES : 'GI_UINTEGER',SQL_SQL92_RELATIONAL_JOIN_OPERATORS : 'GI_UINTEGER',
SQL_SQL92_REVOKE : 'GI_UINTEGER',SQL_SQL92_ROW_VALUE_CONSTRUCTOR : 'GI_UINTEGER',
SQL_SQL92_STRING_FUNCTIONS : 'GI_UINTEGER',SQL_SQL92_VALUE_EXPRESSIONS : 'GI_UINTEGER',
SQL_SQL_CONFORMANCE : 'GI_UINTEGER',SQL_STANDARD_CLI_CONFORMANCE : 'GI_UINTEGER',
SQL_STATIC_CURSOR_ATTRIBUTES1 : 'GI_UINTEGER',SQL_STATIC_CURSOR_ATTRIBUTES2 : 'GI_UINTEGER',
SQL_STRING_FUNCTIONS : 'GI_UINTEGER',SQL_SUBQUERIES : 'GI_UINTEGER',
SQL_SYSTEM_FUNCTIONS : 'GI_UINTEGER',SQL_TABLE_TERM : 'GI_STRING',SQL_TIMEDATE_ADD_INTERVALS : 'GI_UINTEGER',
SQL_TIMEDATE_DIFF_INTERVALS : 'GI_UINTEGER',SQL_TIMEDATE_FUNCTIONS : 'GI_UINTEGER',
SQL_TXN_CAPABLE : 'GI_USMALLINT',SQL_TXN_ISOLATION_OPTION : 'GI_UINTEGER',
SQL_UNION : 'GI_UINTEGER',SQL_USER_NAME : 'GI_STRING',SQL_XOPEN_CLI_YEAR : 'GI_STRING',
}
#Definations for types
BINARY = bytearray
Binary = bytearray
DATETIME = datetime.datetime
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
STRING = str
NUMBER = float
ROWID = int
DateFromTicks = datetime.date.fromtimestamp
TimeFromTicks = lambda x: datetime.datetime.fromtimestamp(x).time()
TimestampFromTicks = datetime.datetime.fromtimestamp
#Define exceptions
class OdbcNoLibrary(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class OdbcLibraryError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class OdbcInvalidHandle(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class OdbcGenericError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Warning(StandardError):
def __init__(self, error_code, error_desc):
self.value = (error_code, error_desc)
self.args = (error_code, error_desc)
class Error(StandardError):
def __init__(self, error_code, error_desc):
self.value = (error_code, error_desc)
self.args = (error_code, error_desc)
class InterfaceError(Error):
def __init__(self, error_code, error_desc):
self.value = (error_code, error_desc)
self.args = (error_code, error_desc)
class DatabaseError(Error):
def __init__(self, error_code, error_desc):
self.value = (error_code, error_desc)
self.args = (error_code, error_desc)
class InternalError(DatabaseError):
def __init__(self, error_code, error_desc):
self.value = (error_code, error_desc)
self.args = (error_code, error_desc)
class ProgrammingError(DatabaseError):
def __init__(self, error_code, error_desc):
self.value = (error_code, error_desc)
self.args = (error_code, error_desc)
class DataError(DatabaseError):
def __init__(self, error_code, error_desc):
self.value = (error_code, error_desc)
self.args = (error_code, error_desc)
class IntegrityError(DatabaseError):
def __init__(self, error_code, error_desc):
self.value = (error_code, error_desc)
self.args = (error_code, error_desc)
class NotSupportedError(Error):
def __init__(self, error_code, error_desc):
self.value = (error_code, error_desc)
self.args = (error_code, error_desc)
class OperationalError(DatabaseError):
def __init__(self, error_code, error_desc):
self.value = (error_code, error_desc)
self.args = (error_code, error_desc)
# Get the References of the platform's ODBC functions via ctypes
if sys.platform in ('win32','cli'):
ODBC_API = ctypes.windll.odbc32
# On Windows, the size of SQLWCHAR is hardcoded to 2-bytes.
SQLWCHAR_SIZE = ctypes.sizeof(ctypes.c_ushort)
else:
# Set load the library on linux
try:
# First try direct loading libodbc.so
ODBC_API = ctypes.cdll.LoadLibrary('libodbc.so')
except:
# If direct loading libodbc.so failed
# We try finding the libodbc.so by using find_library
from ctypes.util import find_library
library = find_library('odbc')
if library is None:
# If find_library still can not find the library
# we try finding it manually from where libodbc.so usually appears
lib_paths = ("/usr/lib/libodbc.so","/usr/lib/i386-linux-gnu/libodbc.so","/usr/lib/x86_64-linux-gnu/libodbc.so")
lib_paths = [path for path in lib_paths if os.path.exists(path)]
if len(lib_paths) == 0 :
raise OdbcNoLibrary, 'ODBC Library is not found'
else:
library = lib_paths[0]
# Then we try loading the found libodbc.so again
try:
ODBC_API = ctypes.cdll.LoadLibrary(library)
except:
# If still fail loading, abort.
raise OdbcLibraryError, 'Error while loading %s' % library
# unixODBC defaults to 2-bytes SQLWCHAR, unless "-DSQL_WCHART_CONVERT" was
# added to CFLAGS, in which case it will be the size of wchar_t.
# Note that using 4-bytes SQLWCHAR will break most ODBC drivers, as driver
# development mostly targets the Windows platform.
import commands
status, output = commands.getstatusoutput('odbc_config --cflags')
if status == 0 and 'SQL_WCHART_CONVERT' in output:
SQLWCHAR_SIZE = ctypes.sizeof(ctypes.c_wchar)
else:
SQLWCHAR_SIZE = ctypes.sizeof(ctypes.c_ushort)
create_buffer_u = ctypes.create_unicode_buffer
create_buffer = ctypes.create_string_buffer
wchar_type = ctypes.c_wchar_p
to_unicode = lambda s: s
from_buffer_u = lambda buffer: buffer.value
# This is the common case on Linux, which uses wide Python build together with
# the default unixODBC without the "-DSQL_WCHART_CONVERT" CFLAGS.
if UNICODE_SIZE > SQLWCHAR_SIZE:
# We can only use unicode buffer if the size of wchar_t (UNICODE_SIZE) is
# the same as the size expected by the driver manager (SQLWCHAR_SIZE).
create_buffer_u = create_buffer
wchar_type = ctypes.c_char_p
def to_unicode(s):
return s.encode('UTF-16LE')
def from_buffer_u(buffer):
i = 0
uchars = []
while True:
uchar = buffer.raw[i:i + 2].decode('UTF-16')
if uchar == u'\x00':
break
uchars.append(uchar)
i += 2
return ''.join(uchars)
# Exoteric case, don't really care.
elif UNICODE_SIZE < SQLWCHAR_SIZE:
raise OdbcLibraryError('Using narrow Python build with ODBC library '
'expecting wide unicode is not supported.')
# Below Datatype mappings referenced the document at
# http://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.help.sdk_12.5.1.aseodbc/html/aseodbc/CACFDIGH.htm
SQL_data_type_dict = { \
#SQL Data TYPE 0.Python Data Type 1.Default Output Converter 2.Buffer Type 3.Buffer Allocator 4.Default Buffer Size
SQL_TYPE_NULL : (None, lambda x: None, SQL_C_CHAR, create_buffer, 2 ),
SQL_CHAR : (str, lambda x: x, SQL_C_CHAR, create_buffer, 2048 ),
SQL_NUMERIC : (Decimal, Decimal, SQL_C_CHAR, create_buffer, 150 ),
SQL_DECIMAL : (Decimal, Decimal, SQL_C_CHAR, create_buffer, 150 ),
SQL_INTEGER : (int, int, SQL_C_CHAR, create_buffer, 150 ),
SQL_SMALLINT : (int, int, SQL_C_CHAR, create_buffer, 150 ),
SQL_FLOAT : (float, float, SQL_C_CHAR, create_buffer, 150 ),
SQL_REAL : (float, float, SQL_C_CHAR, create_buffer, 150 ),
SQL_DOUBLE : (float, float, SQL_C_CHAR, create_buffer, 200 ),
SQL_DATE : (datetime.date, dt_cvt, SQL_C_CHAR , create_buffer, 30 ),
SQL_TIME : (datetime.time, tm_cvt, SQL_C_CHAR, create_buffer, 20 ),
SQL_SS_TIME2 : (datetime.time, tm_cvt, SQL_C_CHAR, create_buffer, 20 ),
SQL_TIMESTAMP : (datetime.datetime, dttm_cvt, SQL_C_CHAR, create_buffer, 30 ),
SQL_VARCHAR : (str, lambda x: x, SQL_C_CHAR, create_buffer, 2048 ),
SQL_LONGVARCHAR : (str, lambda x: x, SQL_C_CHAR, create_buffer, 20500 ),
SQL_BINARY : (bytearray, bytearray, SQL_C_BINARY, create_buffer, 5120 ),
SQL_VARBINARY : (bytearray, bytearray, SQL_C_BINARY, create_buffer, 5120 ),
SQL_LONGVARBINARY : (bytearray, bytearray, SQL_C_BINARY, create_buffer, 20500 ),
SQL_BIGINT : (long, long, SQL_C_CHAR, create_buffer, 150 ),
SQL_TINYINT : (int, int, SQL_C_CHAR, create_buffer, 150 ),
SQL_BIT : (bool, lambda x:x=='1', SQL_C_CHAR, create_buffer, 2 ),
SQL_WCHAR : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 2048 ),
SQL_WVARCHAR : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 2048 ),
SQL_GUID : (str, str, SQL_C_CHAR, create_buffer, 50 ),
SQL_WLONGVARCHAR : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 20500 ),
SQL_TYPE_DATE : (datetime.date, dt_cvt, SQL_C_CHAR, create_buffer, 30 ),
SQL_TYPE_TIME : (datetime.time, tm_cvt, SQL_C_CHAR, create_buffer, 20 ),
SQL_TYPE_TIMESTAMP : (datetime.datetime, dttm_cvt, SQL_C_CHAR, create_buffer, 30 ),
}
"""
Types mapping, applicable for 32-bit and 64-bit Linux / Windows / Mac OS X.
SQLPointer -> ctypes.c_void_p
SQLCHAR * -> ctypes.c_char_p
SQLWCHAR * -> ctypes.c_wchar_p on Windows, ctypes.c_char_p with unixODBC
SQLINT -> ctypes.c_int
SQLSMALLINT -> ctypes.c_short
SQMUSMALLINT -> ctypes.c_ushort
SQLLEN -> ctypes.c_ssize_t
SQLULEN -> ctypes.c_size_t
SQLRETURN -> ctypes.c_short
"""
# Define the python return type for ODBC functions with ret result.
funcs_with_ret = [
"SQLAllocHandle",
"SQLBindParameter",
"SQLCloseCursor",
"SQLColAttribute",
"SQLColumns",
"SQLColumnsW",
"SQLConnect",
"SQLConnectW",
"SQLDataSources",
"SQLDataSourcesW",
"SQLDescribeCol",
"SQLDescribeColW",
"SQLDescribeParam",
"SQLDisconnect",
"SQLDriverConnect",
"SQLDriverConnectW",
"SQLEndTran",
"SQLExecDirect",
"SQLExecDirectW",
"SQLExecute",
"SQLFetch",
"SQLFetchScroll",
"SQLForeignKeys",
"SQLForeignKeysW",
"SQLFreeHandle",
"SQLFreeStmt",
"SQLGetData",
"SQLGetDiagRec",
"SQLGetInfo",
"SQLGetTypeInfo",
"SQLMoreResults",
"SQLNumParams",
"SQLNumResultCols",
"SQLPrepare",
"SQLPrepareW",
"SQLPrimaryKeys",
"SQLPrimaryKeysW",
"SQLProcedureColumns",
"SQLProcedureColumnsW",
"SQLProcedures",
"SQLProceduresW",
"SQLRowCount",
"SQLSetConnectAttr",
"SQLSetEnvAttr",
"SQLStatistics",
"SQLStatisticsW",
"SQLTables",
"SQLTablesW",
]
for func_name in funcs_with_ret:
getattr(ODBC_API, func_name).restype = ctypes.c_short
if sys.platform not in ('cli'):
#Seems like the IronPython can not declare ctypes.POINTER type arguments
ODBC_API.SQLAllocHandle.argtypes = [
ctypes.c_short,
ctypes.c_void_p,
ctypes.POINTER(ctypes.c_void_p),
]
ODBC_API.SQLBindParameter.argtypes = [
ctypes.c_void_p,
ctypes.c_ushort,
ctypes.c_short,
ctypes.c_short,
ctypes.c_short,
ctypes.c_size_t,
ctypes.c_short,
ctypes.c_void_p,
ctypes.c_ssize_t,
ctypes.POINTER(ctypes.c_ssize_t),
]
ODBC_API.SQLColAttribute.argtypes = [
ctypes.c_void_p,
ctypes.c_ushort,
ctypes.c_ushort,
ctypes.c_void_p,
ctypes.c_short,
ctypes.POINTER(ctypes.c_short),
ctypes.POINTER(ctypes.c_ssize_t),
]
ODBC_API.SQLDataSources.argtypes = [
ctypes.c_void_p,
ctypes.c_ushort,
ctypes.c_char_p,
ctypes.c_short,
ctypes.POINTER(ctypes.c_short),
ctypes.c_char_p,
ctypes.c_short,
ctypes.POINTER(ctypes.c_short),
]
ODBC_API.SQLDescribeCol.argtypes = [
ctypes.c_void_p,
ctypes.c_ushort,
ctypes.c_char_p,
ctypes.c_short,
ctypes.POINTER(ctypes.c_short),
ctypes.POINTER(ctypes.c_short),
ctypes.POINTER(ctypes.c_size_t),
ctypes.POINTER(ctypes.c_short),
ctypes.POINTER(ctypes.c_short),
]
ODBC_API.SQLDescribeParam.argtypes = [
ctypes.c_void_p,
ctypes.c_ushort,
ctypes.POINTER(ctypes.c_short),
ctypes.POINTER(ctypes.c_size_t),
ctypes.POINTER(ctypes.c_short),
ctypes.POINTER(ctypes.c_short),
]
ODBC_API.SQLDriverConnect.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.POINTER(ctypes.c_short),
ctypes.c_ushort,
]
ODBC_API.SQLGetData.argtypes = [
ctypes.c_void_p,
ctypes.c_ushort,
ctypes.c_short,
ctypes.c_void_p,
ctypes.c_ssize_t,
ctypes.POINTER(ctypes.c_ssize_t),
]
ODBC_API.SQLGetDiagRec.argtypes = [
ctypes.c_short,
ctypes.c_void_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.POINTER(ctypes.c_int),
ctypes.c_char_p,
ctypes.c_short,
ctypes.POINTER(ctypes.c_short),
]
ODBC_API.SQLGetInfo.argtypes = [
ctypes.c_void_p,
ctypes.c_ushort,
ctypes.c_void_p,
ctypes.c_short,
ctypes.POINTER(ctypes.c_short),
]
ODBC_API.SQLRowCount.argtypes = [
ctypes.c_void_p,
ctypes.POINTER(ctypes.c_ssize_t),
]
ODBC_API.SQLNumParams.argtypes = [
ctypes.c_void_p,
ctypes.POINTER(ctypes.c_short),
]
ODBC_API.SQLNumResultCols.argtypes = [
ctypes.c_void_p,
ctypes.POINTER(ctypes.c_short),
]
ODBC_API.SQLCloseCursor.argtypes = [ctypes.c_void_p]
ODBC_API.SQLColumns.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
]
ODBC_API.SQLConnect.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
]
ODBC_API.SQLDisconnect.argtypes = [ctypes.c_void_p]
ODBC_API.SQLEndTran.argtypes = [
ctypes.c_short,
ctypes.c_void_p,
ctypes.c_short,
]
ODBC_API.SQLExecute.argtypes = [ctypes.c_void_p]
ODBC_API.SQLExecDirect.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int,
]
ODBC_API.SQLFetch.argtypes = [ctypes.c_void_p]
ODBC_API.SQLFetchScroll.argtypes = [
ctypes.c_void_p,
ctypes.c_short,
ctypes.c_ssize_t,
]
ODBC_API.SQLForeignKeys.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
]
ODBC_API.SQLFreeHandle.argtypes = [
ctypes.c_short,
ctypes.c_void_p,
]
ODBC_API.SQLFreeStmt.argtypes = [
ctypes.c_void_p,
ctypes.c_ushort,
]
ODBC_API.SQLGetTypeInfo.argtypes = [
ctypes.c_void_p,
ctypes.c_short,
]
ODBC_API.SQLMoreResults.argtypes = [ctypes.c_void_p]
ODBC_API.SQLPrepare.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int,
]
ODBC_API.SQLPrimaryKeys.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
]
ODBC_API.SQLProcedureColumns.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
]
ODBC_API.SQLProcedures.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
]
ODBC_API.SQLSetConnectAttr.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
]
ODBC_API.SQLSetEnvAttr.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
]
ODBC_API.SQLStatistics.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_ushort,
ctypes.c_ushort,
]
ODBC_API.SQLTables.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
ctypes.c_char_p,
ctypes.c_short,
]
def to_wchar(argtypes):
if argtypes: # Under IronPython some argtypes are not declared
result = []
for x in argtypes:
if x == ctypes.c_char_p:
result.append(wchar_type)
else:
result.append(x)
return result
else:
return argtypes
ODBC_API.SQLColumnsW.argtypes = to_wchar(ODBC_API.SQLColumns.argtypes)
ODBC_API.SQLConnectW.argtypes = to_wchar(ODBC_API.SQLConnect.argtypes)
ODBC_API.SQLDataSourcesW.argtypes = to_wchar(ODBC_API.SQLDataSources.argtypes)
ODBC_API.SQLDescribeColW.argtypes = to_wchar(ODBC_API.SQLDescribeCol.argtypes)
ODBC_API.SQLDriverConnectW.argtypes = to_wchar(ODBC_API.SQLDriverConnect.argtypes)
ODBC_API.SQLExecDirectW.argtypes = to_wchar(ODBC_API.SQLExecDirect.argtypes)
ODBC_API.SQLForeignKeysW.argtypes = to_wchar(ODBC_API.SQLForeignKeys.argtypes)
ODBC_API.SQLPrepareW.argtypes = to_wchar(ODBC_API.SQLPrepare.argtypes)
ODBC_API.SQLPrimaryKeysW.argtypes = to_wchar(ODBC_API.SQLPrimaryKeys.argtypes)
ODBC_API.SQLProcedureColumnsW.argtypes = to_wchar(ODBC_API.SQLProcedureColumns.argtypes)
ODBC_API.SQLProceduresW.argtypes = to_wchar(ODBC_API.SQLProcedures.argtypes)
ODBC_API.SQLStatisticsW.argtypes = to_wchar(ODBC_API.SQLStatistics.argtypes)
ODBC_API.SQLTablesW.argtypes = to_wchar(ODBC_API.SQLTables.argtypes)
# Set the alias for the ctypes functions for beter code readbility or performance.
ADDR = ctypes.byref
SQLFetch = ODBC_API.SQLFetch
SQLExecute = ODBC_API.SQLExecute
SQLBindParameter = ODBC_API.SQLBindParameter
def ctrl_err(ht, h, val_ret):
"""Classify type of ODBC error from (type of handle, handle, return value)
, and raise with a list"""
state = create_buffer(5)
NativeError = ctypes.c_int()
Message = create_buffer(1024*10)
Buffer_len = ctypes.c_short()
err_list = []
number_errors = 1
while 1:
ret = ODBC_API.SQLGetDiagRec(ht, h, number_errors, state, \
NativeError, Message, len(Message), ADDR(Buffer_len))
if ret == SQL_NO_DATA_FOUND:
#No more data, I can raise
if DEBUG: print err_list[0][1]
state = err_list[0][0]
err_text = '['+state+'] '+err_list[0][1]
if state[:2] in ('24','25','42'):
raise ProgrammingError(state,err_text)
elif state[:2] in ('22'):
raise DataError(state,err_text)
elif state[:2] in ('23') or state == '40002':
raise IntegrityError(state,err_text)
elif state == '0A000':
raise NotSupportedError(state,err_text)
elif state in ('HYT00','HYT01'):
raise OperationalError(state,err_text)
elif state[:2] in ('IM','HY'):
raise Error(state,err_text)
else:
raise DatabaseError(state,err_text)
break
elif ret == SQL_INVALID_HANDLE:
#The handle passed is an invalid handle
raise ProgrammingError('', 'SQL_INVALID_HANDLE')
elif ret == SQL_SUCCESS:
err_list.append((state.value, Message.value, NativeError.value))
number_errors += 1
def validate(ret, handle_type, handle):
""" Validate return value, if not success, raise exceptions based on the handle """
if ret not in (SQL_SUCCESS, SQL_SUCCESS_WITH_INFO, SQL_NO_DATA):
ctrl_err(handle_type, handle, ret)
def AllocateEnv():
if pooling:
ret = ODBC_API.SQLSetEnvAttr(SQL_NULL_HANDLE, SQL_ATTR_CONNECTION_POOLING, SQL_CP_ONE_PER_HENV, SQL_IS_UINTEGER)
validate(ret, SQL_HANDLE_ENV, SQL_NULL_HANDLE)
'''
Allocate an ODBC environment by initializing the handle shared_env_h
ODBC enviroment needed to be created, so connections can be created under it
connections pooling can be shared under one environment
'''
global shared_env_h
shared_env_h = ctypes.c_void_p()
ret = ODBC_API.SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, ADDR(shared_env_h))
validate(ret, SQL_HANDLE_ENV, shared_env_h)
# Set the ODBC environment's compatibil leve to ODBC 3.0
ret = ODBC_API.SQLSetEnvAttr(shared_env_h, SQL_ATTR_ODBC_VERSION, SQL_OV_ODBC3, 0)
validate(ret, SQL_HANDLE_ENV, shared_env_h)
"""
Here, we have a few callables that determine how a result row is returned.
A new one can be added by creating a callable that:
- accepts a cursor as its parameter.
- returns a callable that accepts an iterable containing the row values.
"""
def TupleRow(cursor):
"""Normal tuple with added attribute `cursor_description`, as in pyodbc.
This is the default.
"""
class Row(tuple):
cursor_description = cursor.description
return Row
def NamedTupleRow(cursor):
"""Named tuple to allow attribute lookup by name.
Requires py2.6 or above.
"""
from collections import namedtuple
attr_names = [x[0] for x in cursor._ColBufferList]
class Row(namedtuple('Row', attr_names, rename=True)):
cursor_description = cursor.description
def __new__(cls, iterable):
return super(Row, cls).__new__(cls, *iterable)
return Row
def MutableNamedTupleRow(cursor):
"""Mutable named tuple to allow attribute to be replaced. This should be
compatible with pyodbc's Row type.
Requires 3rd-party library "recordtype".
"""
from recordtype import recordtype
attr_names = [x[0] for x in cursor._ColBufferList]
class Row(recordtype('Row', attr_names, rename=True)):
cursor_description = cursor.description
def __init__(self, iterable):
super(Row, self).__init__(*iterable)
def __iter__(self):
for field_name in self.__slots__:
yield getattr(self, field_name)
def __getitem__(self, index):
if isinstance(index, slice):
return tuple(getattr(self, x) for x in self.__slots__[index])
return getattr(self, self.__slots__[index])
def __setitem__(self, index, value):
setattr(self, self.__slots__[index], value)
return Row
# The get_type function is used to determine if parameters need to be re-binded
# against the changed parameter types
def get_type(v):
t = type(v)
if isinstance(v, str):
if len(v) >= 255:
t = 's'
elif isinstance(v, unicode):
if len(v) >= 255:
t = 'u'
elif isinstance(v, Decimal):
sv = str(v).replace('-','').strip('0').split('.')
if len(sv)>1:
t = (len(sv[0])+len(sv[1]),len(sv[1]))
else:
t = (len(sv[0]),0)
return t
# The Cursor Class.
class Cursor:
def __init__(self, conx, row_type_callable=None):
""" Initialize self._stmt_h, which is the handle of a statement
A statement is actually the basis of a python"cursor" object
"""
self._stmt_h = ctypes.c_void_p()
self.connection = conx
self.row_type_callable = row_type_callable or TupleRow
self.statement = None
self._last_param_types = None
self._ParamBufferList = []
self._ColBufferList = []
self._row_type = None
self._buf_cvt_func = []
self.rowcount = -1
self.description = None
self.autocommit = None
self._ColTypeCodeList = []
self._outputsize = {}
self._inputsizers = []
self.arraysize = 1
ret = ODBC_API.SQLAllocHandle(SQL_HANDLE_STMT, self.connection.dbc_h, ADDR(self._stmt_h))
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self.closed = False
def execute(self, query_string, params=None, many_mode=False, call_mode=False):
""" Execute the query string, with optional parameters.
If parameters are provided, the query would first be prepared, then executed with parameters;
If parameters are not provided, only th query sting, it would be executed directly
"""
self._free_results('FREE_STATEMENT')
if params:
# If parameters exist, first prepare the query then executed with parameters
if not type(params) in (tuple, list, set):
raise TypeError("Params must be in a list, tuple, or set")
if not many_mode:
if query_string != self.statement:
# if the query is not same as last query, then it is not prepared
self.prepare(query_string)
param_types = map(get_type, params)
if call_mode:
self._BindParams(param_types, self._pram_io_list)
else:
if param_types != self._last_param_types:
self._BindParams(param_types)
# With query prepared, now put parameters into buffers
col_num = 0
for param_buffer, param_buffer_len, sql_type in self._ParamBufferList:
c_char_buf, c_buf_len = '', 0
param_val = params[col_num]
if param_val is None:
c_buf_len = SQL_NULL_DATA
elif isinstance(param_val, datetime.datetime):
max_len = self.connection.type_size_dic[SQL_TYPE_TIMESTAMP][0]
datetime_str = param_val.strftime('%Y-%m-%d %H:%M:%S.%f')
c_char_buf = datetime_str[:max_len]
c_buf_len = len(c_char_buf)
# print c_buf_len, c_char_buf
elif isinstance(param_val, datetime.date):
if self.connection.type_size_dic.has_key(SQL_TYPE_DATE):
max_len = self.connection.type_size_dic[SQL_TYPE_DATE][0]
else:
max_len = 10
c_char_buf = param_val.isoformat()[:max_len]
c_buf_len = len(c_char_buf)
#print c_char_buf
elif isinstance(param_val, datetime.time):
if self.connection.type_size_dic.has_key(SQL_TYPE_TIME):
max_len = self.connection.type_size_dic[SQL_TYPE_TIME][0]
c_char_buf = param_val.isoformat()[:max_len]
c_buf_len = len(c_char_buf)
elif self.connection.type_size_dic.has_key(SQL_SS_TIME2):
max_len = self.connection.type_size_dic[SQL_SS_TIME2][0]
c_char_buf = param_val.isoformat()[:max_len]
c_buf_len = len(c_char_buf)
else:
c_buf_len = self.connection.type_size_dic[SQL_TYPE_TIMESTAMP][0]
time_str = param_val.isoformat()
if len(time_str) == 8:
time_str += '.000'
c_char_buf = '1900-01-01 '+time_str[0:c_buf_len - 11]
#print c_buf_len, c_char_buf
elif isinstance(param_val, bool):
if param_val == True:
c_char_buf = '1'
else:
c_char_buf = '0'
c_buf_len = 1
elif isinstance(param_val, (int, long, float, Decimal)):
c_char_buf = str(param_val)
c_buf_len = len(c_char_buf)
elif isinstance(param_val, str):
c_char_buf = param_val
c_buf_len = len(c_char_buf)
elif isinstance(param_val, unicode):
c_char_buf = to_unicode(param_val)
c_buf_len = len(c_char_buf)
elif isinstance(param_val, (bytearray, buffer)):
c_char_buf = str(param_val)
c_buf_len = len(c_char_buf)
else:
c_char_buf = param_val
if isinstance(param_val, (bytearray, buffer)):
param_buffer.raw = c_char_buf
else:
param_buffer.value = c_char_buf
#print param_buffer, param_buffer.value
if isinstance(param_val, (unicode, str)):
#ODBC driver will find NUL in unicode and string to determine their length
param_buffer_len.value = SQL_NTS
else:
param_buffer_len.value = c_buf_len
col_num += 1
ret = SQLExecute(self._stmt_h)
if ret != SQL_SUCCESS:
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
if not many_mode:
self._NumOfRows()
self._UpdateDesc()
#self._BindCols()
else:
self.execdirect(query_string)
return (self)
def _SQLExecute(self):
ret = SQLExecute(self._stmt_h)
if ret != SQL_SUCCESS:
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
def prepare(self, query_string):
"""prepare a query"""
if type(query_string) == unicode:
c_query_string = wchar_type(to_unicode(query_string))
ret = ODBC_API.SQLPrepareW(self._stmt_h, c_query_string, len(query_string))
else:
c_query_string = ctypes.c_char_p(query_string)
ret = ODBC_API.SQLPrepare(self._stmt_h, c_query_string, len(query_string))
if ret != SQL_SUCCESS:
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self.statement = query_string
def execdirect(self, query_string):
"""Execute a query directly"""
if type(query_string) == unicode:
c_query_string = wchar_type(to_unicode(query_string))
ret = ODBC_API.SQLExecDirectW(self._stmt_h, c_query_string, len(query_string))
else:
c_query_string = ctypes.c_char_p(query_string)
ret = ODBC_API.SQLExecDirect(self._stmt_h, c_query_string, len(query_string))
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self._NumOfRows()
self._UpdateDesc()
#self._BindCols()
self.statement = None
return (self)
def callproc(self, procname, args):
raise Warning('', 'Still not fully implemented')
self._pram_io_list = [row[4] for row in self.procedurecolumns(procedure = procname).fetchall() if row[4] not in (SQL_RESULT_COL, SQL_RETURN_VALUE)]
print 'pram_io_list: '+str(self._pram_io_list)
call_escape = '{CALL '+procname
if args:
call_escape += '(' + ','.join(['?' for params in args]) + ')'
call_escape += '}'
self.execute(call_escape, args, call_mode = True)
result = []
for buf, buf_len, sql_type in self._ParamBufferList:
if buf_len.value == -1:
result.append(None)
else:
result.append(self.connection.output_converter[sql_type](buf.value))
return (result)
def executemany(self, query_string, params_list = [None]):
self.prepare(query_string)
for params in params_list:
self.execute(query_string, params, many_mode = True)
self._NumOfRows()
self.rowcount = -1
self._UpdateDesc()
#self._BindCols()
def _BindParams(self, param_types, pram_io_list = []):
"""Create parameter buffers based on param types, and bind them to the statement"""
# Get the number of query parameters judged by database.
NumParams = ctypes.c_short()
ret = ODBC_API.SQLNumParams(self._stmt_h, ADDR(NumParams))
if ret != SQL_SUCCESS:
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
if len(param_types) != NumParams.value:
# In case number of parameters provided do not same as number required
error_desc = "The SQL contains %d parameter markers, but %d parameters were supplied" \
%(NumParams.value,len(param_types))
raise ProgrammingError('HY000',error_desc)
# Every parameter needs to be binded to a buffer
ParamBufferList = []
# Temporary holder since we can only call SQLDescribeParam before
# calling SQLBindParam.
temp_holder = []
for col_num in range(NumParams.value):
col_size = 0
buf_size = 512
if param_types[col_num] == type(None):
ParameterNumber = ctypes.c_ushort(col_num + 1)
DataType = ctypes.c_short()
ParameterSize = ctypes.c_size_t()
DecimalDigits = ctypes.c_short()
Nullable = ctypes.c_short()
ret = ODBC_API.SQLDescribeParam(
self._stmt_h,
ParameterNumber,
ADDR(DataType),
ADDR(ParameterSize),
ADDR(DecimalDigits),
ADDR(Nullable),
)
if ret != SQL_SUCCESS:
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
sql_c_type = SQL_C_DEFAULT
sql_type = DataType.value
buf_size = 1
ParameterBuffer = create_buffer(buf_size)
elif param_types[col_num] == 'u':
sql_c_type = SQL_C_WCHAR
sql_type = SQL_WLONGVARCHAR
buf_size = len(self._inputsizers)>col_num and self._inputsizers[col_num] or 20500
ParameterBuffer = create_buffer_u(buf_size)
elif param_types[col_num] == 's':
sql_c_type = SQL_C_CHAR
sql_type = SQL_LONGVARCHAR
buf_size = len(self._inputsizers)>col_num and self._inputsizers[col_num] or 20500
ParameterBuffer = create_buffer(buf_size)
elif type(param_types[col_num]) == tuple: #Decimal
sql_c_type = SQL_C_CHAR
sql_type = SQL_NUMERIC
buf_size = param_types[col_num][0]
ParameterBuffer = create_buffer(buf_size+4)
col_size = param_types[col_num][1]
if DEBUG: print param_types[col_num][0],param_types[col_num][1]
# bool subclasses int, thus has to go first
elif issubclass(param_types[col_num], bool):
sql_c_type = SQL_C_CHAR
sql_type = SQL_BIT
buf_size = SQL_data_type_dict[sql_type][4]
ParameterBuffer = create_buffer(buf_size)
elif issubclass(param_types[col_num], int):
sql_c_type = SQL_C_CHAR
sql_type = SQL_INTEGER
buf_size = SQL_data_type_dict[sql_type][4]
ParameterBuffer = create_buffer(buf_size)
elif issubclass(param_types[col_num], long):
sql_c_type = SQL_C_CHAR
sql_type = SQL_BIGINT
buf_size = SQL_data_type_dict[sql_type][4]
ParameterBuffer = create_buffer(buf_size)
elif issubclass(param_types[col_num], float):
sql_c_type = SQL_C_CHAR
sql_type = SQL_DOUBLE
buf_size = SQL_data_type_dict[sql_type][4]
ParameterBuffer = create_buffer(buf_size)
# datetime subclasses date, thus has to go first
elif issubclass(param_types[col_num], datetime.datetime):
sql_c_type = SQL_C_CHAR
sql_type = SQL_TYPE_TIMESTAMP
buf_size = self.connection.type_size_dic[SQL_TYPE_TIMESTAMP][0]
ParameterBuffer = create_buffer(buf_size)
col_size = self.connection.type_size_dic[SQL_TYPE_TIMESTAMP][1]
elif issubclass(param_types[col_num], datetime.date):
sql_c_type = SQL_C_CHAR
if self.connection.type_size_dic.has_key(SQL_TYPE_DATE):
if DEBUG: print 'conx.type_size_dic.has_key(SQL_TYPE_DATE)'
sql_type = SQL_TYPE_DATE
buf_size = self.connection.type_size_dic[SQL_TYPE_DATE][0]
ParameterBuffer = create_buffer(buf_size)
col_size = self.connection.type_size_dic[SQL_TYPE_DATE][1]
else:
# SQL Sever <2008 doesn't have a DATE type.
sql_type = SQL_TYPE_TIMESTAMP
buf_size = 10
ParameterBuffer = create_buffer(buf_size)
elif issubclass(param_types[col_num], datetime.time):
sql_c_type = SQL_C_CHAR
if self.connection.type_size_dic.has_key(SQL_TYPE_TIME):
sql_type = SQL_TYPE_TIME
buf_size = self.connection.type_size_dic[SQL_TYPE_TIME][0]
ParameterBuffer = create_buffer(buf_size)
col_size = self.connection.type_size_dic[SQL_TYPE_TIME][1]
elif self.connection.type_size_dic.has_key(SQL_SS_TIME2):
# TIME type added in SQL Server 2008
sql_type = SQL_SS_TIME2
buf_size = self.connection.type_size_dic[SQL_SS_TIME2][0]
ParameterBuffer = create_buffer(buf_size)
col_size = self.connection.type_size_dic[SQL_SS_TIME2][1]
else:
# SQL Sever <2008 doesn't have a TIME type.
sql_type = SQL_TYPE_TIMESTAMP
buf_size = self.connection.type_size_dic[SQL_TYPE_TIMESTAMP][0]
ParameterBuffer = create_buffer(buf_size)
col_size = 3
elif issubclass(param_types[col_num], unicode):
sql_c_type = SQL_C_WCHAR
sql_type = SQL_WVARCHAR
buf_size = 255
ParameterBuffer = create_buffer_u(buf_size)
elif issubclass(param_types[col_num], str):
sql_c_type = SQL_C_CHAR
sql_type = SQL_VARCHAR
buf_size = 255
ParameterBuffer = create_buffer(buf_size)
elif issubclass(param_types[col_num], (bytearray, buffer)):
sql_c_type = SQL_C_BINARY
sql_type = SQL_LONGVARBINARY
buf_size = len(self._inputsizers)>col_num and self._inputsizers[col_num] or 20500
ParameterBuffer = create_buffer(buf_size)
else:
sql_c_type = SQL_C_CHAR
sql_type = SQL_LONGVARCHAR
buf_size = len(self._inputsizers)>col_num and self._inputsizers[col_num] or 20500
ParameterBuffer = create_buffer(buf_size)
temp_holder.append((sql_c_type, sql_type, buf_size, col_size, ParameterBuffer))
for col_num, (sql_c_type, sql_type, buf_size, col_size, ParameterBuffer) in enumerate(temp_holder):
BufferLen = ctypes.c_ssize_t(buf_size)
LenOrIndBuf = ctypes.c_ssize_t()
InputOutputType = SQL_PARAM_INPUT
if len(pram_io_list) > col_num:
InputOutputType = pram_io_list[col_num]
ret = SQLBindParameter(self._stmt_h, col_num + 1, InputOutputType, sql_c_type, sql_type, buf_size,\
col_size, ADDR(ParameterBuffer), BufferLen,ADDR(LenOrIndBuf))
if ret != SQL_SUCCESS:
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
# Append the value buffer and the lenth buffer to the array
ParamBufferList.append((ParameterBuffer,LenOrIndBuf,sql_type))
self._last_param_types = param_types
self._ParamBufferList = ParamBufferList
def _CreateColBuf(self):
NOC = self._NumOfCols()
self._ColBufferList = []
self._row_type = None
for col_num in range(NOC):
col_name = self.description[col_num][0]
col_sql_data_type = self._ColTypeCodeList[col_num]
# set default size base on the column's sql data type
total_buf_len = SQL_data_type_dict[col_sql_data_type][4]
# over-write if there's preset size value for "large columns"
if total_buf_len >= 20500:
total_buf_len = self._outputsize.get(None,total_buf_len)
# over-write if there's preset size value for the "col_num" column
total_buf_len = self._outputsize.get(col_num, total_buf_len)
alloc_buffer = SQL_data_type_dict[col_sql_data_type][3](total_buf_len)
used_buf_len = ctypes.c_ssize_t()
target_type = SQL_data_type_dict[col_sql_data_type][2]
force_unicode = self.connection.unicode_results
if force_unicode and col_sql_data_type in (SQL_CHAR,SQL_VARCHAR,SQL_LONGVARCHAR):
target_type = SQL_C_WCHAR
alloc_buffer = create_buffer_u(total_buf_len)
buf_cvt_func = self.connection.output_converter[self._ColTypeCodeList[col_num]]
self._ColBufferList.append([col_name, target_type, used_buf_len, alloc_buffer, total_buf_len, buf_cvt_func])
def _GetData(self):
'''Bind buffers for the record set columns'''
# Lazily create the row type on first fetch.
if self._row_type is None:
self._row_type = self.row_type_callable(self)
value_list = []
col_num = 0
for col_name, target_type, used_buf_len, alloc_buffer, total_buf_len, buf_cvt_func in self._ColBufferList:
blocks = []
while True:
ret = ODBC_API.SQLGetData(self._stmt_h, col_num + 1, target_type, ADDR(alloc_buffer), total_buf_len,\
ADDR(used_buf_len))
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
if ret == SQL_SUCCESS:
if used_buf_len.value == SQL_NULL_DATA:
blocks.append(None)
else:
if target_type == SQL_C_BINARY:
blocks.append(alloc_buffer.raw[:used_buf_len.value])
elif target_type == SQL_C_WCHAR:
blocks.append(from_buffer_u(alloc_buffer))
else:
#print col_name, target_type, alloc_buffer.value
blocks.append(alloc_buffer.value)
break
if ret == SQL_SUCCESS_WITH_INFO:
if target_type == SQL_C_BINARY:
blocks.append(alloc_buffer.raw)
else:
blocks.append(alloc_buffer.value)
if ret == SQL_NO_DATA:
break
if len(blocks) == 1:
raw_value = blocks[0]
else:
raw_value = ''.join(blocks)
if raw_value == None:
value_list.append(None)
else:
value_list.append(buf_cvt_func(raw_value))
col_num += 1
return self._row_type(value_list)
def _UpdateDesc(self):
"Get the information of (name, type_code, display_size, internal_size, col_precision, scale, null_ok)"
Cname = create_buffer(1024)
Cname_ptr = ctypes.c_short()
Ctype_code = ctypes.c_short()
Csize = ctypes.c_size_t()
Cdisp_size = ctypes.c_ssize_t(0)
CDecimalDigits = ctypes.c_short()
Cnull_ok = ctypes.c_short()
ColDescr = []
self._ColTypeCodeList = []
NOC = self._NumOfCols()
for col in range(1, NOC+1):
ret = ODBC_API.SQLColAttribute(self._stmt_h, col, SQL_DESC_DISPLAY_SIZE, ADDR(create_buffer(10)),
10, ADDR(ctypes.c_short()),ADDR(Cdisp_size))
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
ret = ODBC_API.SQLDescribeCol(self._stmt_h, col, Cname, len(Cname), ADDR(Cname_ptr),\
ADDR(Ctype_code),ADDR(Csize),ADDR(CDecimalDigits), ADDR(Cnull_ok))
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
col_name = Cname.value
if lowercase:
col_name = str.lower(col_name)
#(name, type_code, display_size,
# internal_size, col_precision, scale, null_ok)
ColDescr.append((col_name, SQL_data_type_dict.get(Ctype_code.value,(Ctype_code.value))[0],Cdisp_size.value,\
Csize.value, Csize.value,CDecimalDigits.value,Cnull_ok.value == 1 and True or False))
self._ColTypeCodeList.append(Ctype_code.value)
if len(ColDescr) > 0:
self.description = ColDescr
else:
self.description = None
self._CreateColBuf()
def _NumOfRows(self):
"""Get the number of rows"""
NOR = ctypes.c_ssize_t()
ret = ODBC_API.SQLRowCount(self._stmt_h, ADDR(NOR))
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self.rowcount = NOR.value
return self.rowcount
def _NumOfCols(self):
"""Get the number of cols"""
NOC = ctypes.c_short()
ret = ODBC_API.SQLNumResultCols(self._stmt_h, ADDR(NOC))
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
return NOC.value
def fetchall(self):
rows = []
while True:
row = self.fetchone()
if row == None:
break
rows.append(row)
return rows
def fetchmany(self, num = None):
if num == None:
num = self.arraysize
rows, row_num = [], 0
while row_num < num:
row = self.fetchone()
if row == None:
break
rows.append(row)
row_num += 1
return rows
def fetchone(self):
ret = SQLFetch(self._stmt_h)
if ret == SQL_SUCCESS:
return self._GetData()
else:
if ret == SQL_NO_DATA_FOUND:
return None
else:
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
def next(self):
row = self.fetchone()
if row == None:
raise(StopIteration)
return row
def __iter__(self):
return self
def skip(self, count = 0):
for i in xrange(count):
ret = ODBC_API.SQLFetchScroll(self._stmt_h, SQL_FETCH_NEXT, 0)
if ret != SQL_SUCCESS:
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
return None
def nextset(self):
ret = ODBC_API.SQLMoreResults(self._stmt_h)
if ret not in (SQL_SUCCESS, SQL_NO_DATA):
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
if ret == SQL_NO_DATA:
self._free_results('FREE_STATEMENT')
return False
else:
self._NumOfRows()
self._UpdateDesc()
#self._BindCols()
return True
def _free_results(self, free_statement):
if not self.connection.connected:
raise ProgrammingError('HY000','Attempt to use a closed connection.')
self.description = None
if free_statement == 'FREE_STATEMENT':
ret = ODBC_API.SQLFreeStmt(self._stmt_h, SQL_CLOSE)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
else:
ret = ODBC_API.SQLFreeStmt(self._stmt_h, SQL_UNBIND)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
ret = ODBC_API.SQLFreeStmt(self._stmt_h, SQL_RESET_PARAMS)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self.rowcount = -1
def getTypeInfo(self, sqlType = None):
if sqlType == None:
type = SQL_ALL_TYPES
else:
type = sqlType
ret = ODBC_API.SQLGetTypeInfo(self._stmt_h, type)
if ret in (SQL_SUCCESS, SQL_SUCCESS_WITH_INFO):
self._NumOfRows()
self._UpdateDesc()
#self._BindCols()
return self.fetchone()
def tables(self, table=None, catalog=None, schema=None, tableType=None):
"""Return a list with all tables"""
l_catalog = l_schema = l_table = l_tableType = 0
if catalog != None:
l_catalog = len(catalog)
catalog = ctypes.c_char_p(catalog)
if schema != None:
l_schema = len(schema)
schema = ctypes.c_char_p(schema)
if table != None:
l_table = len(table)
table = ctypes.c_char_p(table)
if tableType != None:
l_tableType = len(tableType)
tableType = ctypes.c_char_p(tableType)
self._free_results('FREE_STATEMENT')
self.statement = None
ret = ODBC_API.SQLTables(self._stmt_h,
catalog, l_catalog,
schema, l_schema,
table, l_table,
tableType, l_tableType)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self._NumOfRows()
self._UpdateDesc()
#self._BindCols()
return (self)
def columns(self, table=None, catalog=None, schema=None, column=None):
"""Return a list with all columns"""
l_catalog = l_schema = l_table = l_column = 0
if catalog != None:
l_catalog = len(catalog)
catalog = ctypes.c_char_p(catalog)
if schema != None:
l_schema = len(schema)
schema = ctypes.c_char_p(schema)
if table != None:
l_table = len(table)
table = ctypes.c_char_p(table)
if column != None:
l_column = len(column)
column = ctypes.c_char_p(column)
self._free_results('FREE_STATEMENT')
self.statement = None
ret = ODBC_API.SQLColumns(self._stmt_h,
catalog, l_catalog,
schema, l_schema,
table, l_table,
column, l_column)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self._NumOfRows()
self._UpdateDesc()
#self._BindCols()
return (self)
def primaryKeys(self, table=None, catalog=None, schema=None):
l_catalog = l_schema = l_table = 0
if catalog != None:
l_catalog = len(catalog)
catalog = ctypes.c_char_p(catalog)
if schema != None:
l_schema = len(schema)
schema = ctypes.c_char_p(schema)
if table != None:
l_table = len(table)
table = ctypes.c_char_p(table)
self._free_results('FREE_STATEMENT')
self.statement = None
ret = ODBC_API.SQLPrimaryKeys(self._stmt_h,
catalog, l_catalog,
schema, l_schema,
table, l_table)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self._NumOfRows()
self._UpdateDesc()
#self._BindCols()
return (self)
def foreignKeys(self, table=None, catalog=None, schema=None, foreignTable=None, foreignCatalog=None, foreignSchema=None):
l_catalog = l_schema = l_table = l_foreignTable = l_foreignCatalog = l_foreignSchema = 0
if catalog != None:
l_catalog = len(catalog)
catalog = ctypes.c_char_p(catalog)
if schema != None:
l_schema = len(schema)
schema = ctypes.c_char_p(schema)
if table != None:
l_table = len(table)
table = ctypes.c_char_p(table)
if foreignTable != None:
l_foreignTable = len(foreignTable)
foreignTable = ctypes.c_char_p(foreignTable)
if foreignCatalog != None:
l_foreignCatalog = len(foreignCatalog)
foreignCatalog = ctypes.c_char_p(foreignCatalog)
if foreignSchema != None:
l_foreignSchema = len(foreignSchema)
foreignSchema = ctypes.c_char_p(foreignSchema)
self._free_results('FREE_STATEMENT')
self.statement = None
ret = ODBC_API.SQLForeignKeys(self._stmt_h,
catalog, l_catalog,
schema, l_schema,
table, l_table,
foreignCatalog, l_foreignCatalog,
foreignSchema, l_foreignSchema,
foreignTable, l_foreignTable)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self._NumOfRows()
self._UpdateDesc()
#self._BindCols()
return (self)
def procedurecolumns(self, procedure=None, catalog=None, schema=None, column=None):
l_catalog = l_schema = l_procedure = l_column = 0
if catalog != None:
l_catalog = len(catalog)
catalog = ctypes.c_char_p(catalog)
if schema != None:
l_schema = len(schema)
schema = ctypes.c_char_p(schema)
if procedure != None:
l_procedure = len(procedure)
procedure = ctypes.c_char_p(procedure)
if column != None:
l_column = len(column)
column = ctypes.c_char_p(column)
self._free_results('FREE_STATEMENT')
self.statement = None
ret = ODBC_API.SQLProcedureColumns(self._stmt_h,
catalog, l_catalog,
schema, l_schema,
procedure, l_procedure,
column, l_column)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self._NumOfRows()
self._UpdateDesc()
return (self)
def procedures(self, procedure=None, catalog=None, schema=None):
l_catalog = l_schema = l_procedure = 0
if catalog != None:
l_catalog = len(catalog)
catalog = ctypes.c_char_p(catalog)
if schema != None:
l_schema = len(schema)
schema = ctypes.c_char_p(schema)
if procedure != None:
l_procedure = len(procedure)
procedure = ctypes.c_char_p(procedure)
self._free_results('FREE_STATEMENT')
self.statement = None
ret = ODBC_API.SQLProcedures(self._stmt_h,
catalog, l_catalog,
schema, l_schema,
procedure, l_procedure)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self._NumOfRows()
self._UpdateDesc()
return (self)
def statistics(self, table, catalog=None, schema=None, unique=False, quick=True):
l_table = l_catalog = l_schema = 0
if catalog != None:
l_catalog = len(catalog)
catalog = ctypes.c_char_p(catalog)
if schema != None:
l_schema = len(schema)
schema = ctypes.c_char_p(schema)
if table != None:
l_table = len(table)
table = ctypes.c_char_p(table)
if unique:
Unique = SQL_INDEX_UNIQUE
else:
Unique = SQL_INDEX_ALL
if quick:
Reserved = SQL_QUICK
else:
Reserved = SQL_ENSURE
self._free_results('FREE_STATEMENT')
self.statement = None
ret = ODBC_API.SQLStatistics(self._stmt_h,
catalog, l_catalog,
schema, l_schema,
table, l_table,
Unique, Reserved)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self._NumOfRows()
self._UpdateDesc()
#self._BindCols()
return (self)
def commit(self):
self.connection.commit()
def rollback(self):
self.connection.rollback()
def setoutputsize(self, size, column = None):
self._outputsize[column] = size
def setinputsizes(self, sizes):
self._inputsizers = [size for size in sizes]
def close(self):
""" Call SQLCloseCursor API to free the statement handle"""
# ret = ODBC_API.SQLCloseCursor(self._stmt_h)
# validate(ret, SQL_HANDLE_STMT, self._stmt_h)
#
ret = ODBC_API.SQLFreeStmt(self._stmt_h, SQL_CLOSE)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
ret = ODBC_API.SQLFreeStmt(self._stmt_h, SQL_UNBIND)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
ret = ODBC_API.SQLFreeStmt(self._stmt_h, SQL_RESET_PARAMS)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
ret = ODBC_API.SQLFreeHandle(SQL_HANDLE_STMT, self._stmt_h)
validate(ret, SQL_HANDLE_STMT, self._stmt_h)
self.closed = True
def __del__(self):
if not self.closed:
if DEBUG: print 'auto closing cursor: ',
try:
self.close()
except:
if DEBUG: print 'failed'
pass
else:
if DEBUG: print 'succeed'
pass
def __exit__(self, type, value, traceback):
if value:
self.rollback()
else:
self.commit()
self.close()
def __enter__(self):
return self
# This class implement a odbc connection.
#
#
class Connection:
def __init__(self, connectString = '', autocommit = False, ansi = False, timeout = 0, unicode_results = False, readonly = False, **kargs):
"""Init variables and connect to the engine"""
self.connected = 0
self.type_size_dic = {}
self.unicode_results = False
self.dbc_h = ctypes.c_void_p()
self.autocommit = autocommit
self.readonly = False
self.timeout = 0
for key, value in kargs.items():
connectString = connectString + key + '=' + value + ';'
self.connectString = connectString
self.clear_output_converters()
try:
lock.acquire()
if shared_env_h == None:
#Initialize an enviroment if it is not created.
AllocateEnv()
finally:
lock.release()
# Allocate an DBC handle self.dbc_h under the environment shared_env_h
# This DBC handle is actually the basis of a "connection"
# The handle of self.dbc_h will be used to connect to a certain source
# in the self.connect and self.ConnectByDSN method
ret = ODBC_API.SQLAllocHandle(SQL_HANDLE_DBC, shared_env_h, ADDR(self.dbc_h))
validate(ret, SQL_HANDLE_DBC, self.dbc_h)
self.connect(connectString, autocommit, ansi, timeout, unicode_results, readonly)
def connect(self, connectString = '', autocommit = False, ansi = False, timeout = 0, unicode_results = False, readonly = False):
"""Connect to odbc, using connect strings and set the connection's attributes like autocommit and timeout
by calling SQLSetConnectAttr
"""
# Before we establish the connection by the connection string
# Set the connection's attribute of "timeout" (Actully LOGIN_TIMEOUT)
if timeout != 0:
ret = ODBC_API.SQLSetConnectAttr(self.dbc_h, SQL_ATTR_LOGIN_TIMEOUT, timeout, SQL_IS_UINTEGER);
validate(ret, SQL_HANDLE_DBC, self.dbc_h)
# Create one connection with a connect string by calling SQLDriverConnect
# and make self.dbc_h the handle of this connection
# Convert the connetsytring to encoded string
# so it can be converted to a ctypes c_char array object
if not ansi:
c_connectString = wchar_type(to_unicode(self.connectString))
odbc_func = ODBC_API.SQLDriverConnectW
else:
c_connectString = ctypes.c_char_p(self.connectString)
odbc_func = ODBC_API.SQLDriverConnect
# With unixODBC, SQLDriverConnect will intermittently fail with error:
# [01000] [unixODBC][Driver Manager]Can't open lib '/path/to/so' : file not found"
# or:
# [01000] [unixODBC][Driver Manager]Can't open lib '/path/to/so' : (null)"
# when called concurrently by more than one threads. So, we have to
# use a lock to serialize the calls. By the way, the error is much
# less likely to happen if ODBC Tracing is enabled, likely due to the
# implicit serialization caused by writing to trace file.
if ODBC_API._name != 'odbc32':
try:
lock.acquire()
ret = odbc_func(self.dbc_h, 0, c_connectString, len(self.connectString), None, 0, None, SQL_DRIVER_NOPROMPT)
finally:
lock.release()
else:
ret = odbc_func(self.dbc_h, 0, c_connectString, len(self.connectString), None, 0, None, SQL_DRIVER_NOPROMPT)
validate(ret, SQL_HANDLE_DBC, self.dbc_h)
# Set the connection's attribute of "autocommit"
#
self.autocommit = autocommit
if self.autocommit == True:
ret = ODBC_API.SQLSetConnectAttr(self.dbc_h, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_ON, SQL_IS_UINTEGER)
else:
ret = ODBC_API.SQLSetConnectAttr(self.dbc_h, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, SQL_IS_UINTEGER)
validate(ret, SQL_HANDLE_DBC, self.dbc_h)
# Set the connection's attribute of "readonly"
#
self.readonly = readonly
ret = ODBC_API.SQLSetConnectAttr(self.dbc_h, SQL_ATTR_ACCESS_MODE, self.readonly and SQL_MODE_READ_ONLY or SQL_MODE_READ_WRITE, SQL_IS_UINTEGER)
validate(ret, SQL_HANDLE_DBC, self.dbc_h)
self.unicode_results = unicode_results
self.update_type_size_info()
self.connected = 1
def clear_output_converters(self):
self.output_converter = {}
for sqltype, profile in SQL_data_type_dict.items():
self.output_converter[sqltype] = profile[1]
def add_output_converter(self, sqltype, func):
self.output_converter[sqltype] = func
def settimeout(self, timeout):
ret = ODBC_API.SQLSetConnectAttr(self.dbc_h, SQL_ATTR_CONNECTION_TIMEOUT, timeout, SQL_IS_UINTEGER);
validate(ret, SQL_HANDLE_DBC, self.dbc_h)
self.timeout = timeout
def ConnectByDSN(self, dsn, user, passwd = ''):
"""Connect to odbc, we need dsn, user and optionally password"""
self.dsn = dsn
self.user = user
self.passwd = passwd
sn = create_buffer(dsn)
un = create_buffer(user)
pw = create_buffer(passwd)
ret = ODBC_API.SQLConnect(self.dbc_h, sn, len(sn), un, len(un), pw, len(pw))
validate(ret, SQL_HANDLE_DBC, self.dbc_h)
self.update_type_size_info()
self.connected = 1
def cursor(self, row_type_callable=None):
#self.settimeout(self.timeout)
if not self.connected:
raise ProgrammingError('HY000','Attempt to use a closed connection.')
return Cursor(self, row_type_callable=row_type_callable)
def update_type_size_info(self):
for sql_type in (
SQL_TYPE_TIMESTAMP,
SQL_TYPE_DATE,
SQL_TYPE_TIME,
SQL_SS_TIME2,
):
cur = Cursor(self)
info_tuple = cur.getTypeInfo(sql_type)
if info_tuple != None:
self.type_size_dic[sql_type] = info_tuple[2], info_tuple[14]
cur.close()
def commit(self):
if not self.connected:
raise ProgrammingError('HY000','Attempt to use a closed connection.')
ret = ODBC_API.SQLEndTran(SQL_HANDLE_DBC, self.dbc_h, SQL_COMMIT);
validate(ret, SQL_HANDLE_DBC, self.dbc_h)
def rollback(self):
if not self.connected:
raise ProgrammingError('HY000','Attempt to use a closed connection.')
ret = ODBC_API.SQLEndTran(SQL_HANDLE_DBC, self.dbc_h, SQL_ROLLBACK);
validate(ret, SQL_HANDLE_DBC, self.dbc_h)
def getinfo(self,infotype):
if infotype not in aInfoTypes.keys():
raise ProgrammingError('HY000','Invalid getinfo value: '+str(infotype))
if aInfoTypes[infotype] == 'GI_UINTEGER':
total_buf_len = 1000
alloc_buffer = ctypes.c_ulong()
used_buf_len = ctypes.c_short()
ret = ODBC_API.SQLGetInfo(self.dbc_h,infotype,ADDR(alloc_buffer), total_buf_len,\
ADDR(used_buf_len))
validate(ret, SQL_HANDLE_DBC, self.dbc_h)
result = alloc_buffer.value
elif aInfoTypes[infotype] == 'GI_USMALLINT':
total_buf_len = 1000
alloc_buffer = ctypes.c_ushort()
used_buf_len = ctypes.c_short()
ret = ODBC_API.SQLGetInfo(self.dbc_h,infotype,ADDR(alloc_buffer), total_buf_len,\
ADDR(used_buf_len))
validate(ret, SQL_HANDLE_DBC, self.dbc_h)
result = alloc_buffer.value
else:
total_buf_len = 1000
alloc_buffer = create_buffer(total_buf_len)
used_buf_len = ctypes.c_short()
ret = ODBC_API.SQLGetInfo(self.dbc_h,infotype,ADDR(alloc_buffer), total_buf_len,\
ADDR(used_buf_len))
validate(ret, SQL_HANDLE_DBC, self.dbc_h)
result = alloc_buffer.value
if aInfoTypes[infotype] == 'GI_YESNO':
if result[0] == 'Y':
result = True
else:
result = False
return result
def __exit__(self, type, value, traceback):
if value:
self.rollback()
else:
self.commit()
if self.connected:
self.close()
def __enter__(self):
return self
def __del__(self):
if self.connected:
self.close()
def close(self):
if not self.connected:
raise ProgrammingError('HY000','Attempt to close a closed connection.')
if self.connected:
if DEBUG: print 'disconnect'
if not self.autocommit:
self.rollback()
ret = ODBC_API.SQLDisconnect(self.dbc_h)
validate(ret, SQL_HANDLE_DBC, self.dbc_h)
if DEBUG: print 'free dbc'
ret = ODBC_API.SQLFreeHandle(SQL_HANDLE_DBC, self.dbc_h)
validate(ret, SQL_HANDLE_DBC, self.dbc_h)
# if shared_env_h.value:
# if DEBUG: print 'env'
# ret = ODBC_API.SQLFreeHandle(SQL_HANDLE_ENV, shared_env_h)
# validate(ret, SQL_HANDLE_ENV, shared_env_h)
self.connected = 0
odbc = Connection
connect = odbc
'''
def connect(connectString = '', autocommit = False, ansi = False, timeout = 0, unicode_results = False, readonly = False, **kargs):
return odbc(connectString, autocommit, ansi, timeout, unicode_results, readonly, kargs)
'''
def win_create_mdb(mdb_path, sort_order = "General\0\0"):
if sys.platform not in ('win32','cli'):
raise Exception('This function is available for use in Windows only.')
#CREATE_DB=<path name> <sort order>
ctypes.windll.ODBCCP32.SQLConfigDataSource.argtypes = [ctypes.c_void_p,ctypes.c_ushort,ctypes.c_char_p,ctypes.c_char_p]
c_Path = "CREATE_DB=" + mdb_path + " " + sort_order
ODBC_ADD_SYS_DSN = 1
ret = ctypes.windll.ODBCCP32.SQLConfigDataSource(None,ODBC_ADD_SYS_DSN,"Microsoft Access Driver (*.mdb)", c_Path)
if not ret:
raise Exception('Failed to create Access mdb file. Please check file path, permission and Access driver readiness.')
def win_compact_mdb(mdb_path, compacted_mdb_path, sort_order = "General\0\0"):
if sys.platform not in ('win32','cli'):
raise Exception('This function is available for use in Windows only.')
#COMPACT_DB=<source path> <destination path> <sort order>
c_Path = "COMPACT_DB=" + mdb_path + " " + compacted_mdb_path + " " + sort_order
ODBC_ADD_SYS_DSN = 1
ctypes.windll.ODBCCP32.SQLConfigDataSource.argtypes = [ctypes.c_void_p,ctypes.c_ushort,ctypes.c_char_p,ctypes.c_char_p]
ret = ctypes.windll.ODBCCP32.SQLConfigDataSource(None,ODBC_ADD_SYS_DSN,"Microsoft Access Driver (*.mdb)", c_Path)
if not ret:
raise Exception('Failed to compact Access mdb file. Please check file path, permission and Access driver readiness.')
def dataSources():
"""Return a list with [name, descrition]"""
dsn = create_buffer(1024)
desc = create_buffer(1024)
dsn_len = ctypes.c_short()
desc_len = ctypes.c_short()
dsn_list = {}
try:
lock.acquire()
if shared_env_h == None:
AllocateEnv()
finally:
lock.release()
while 1:
ret = ODBC_API.SQLDataSources(shared_env_h, SQL_FETCH_NEXT, \
dsn, len(dsn), ADDR(dsn_len), desc, len(desc), ADDR(desc_len))
if ret == SQL_NO_DATA_FOUND:
break
elif not ret in (SQL_SUCCESS, SQL_SUCCESS_WITH_INFO):
ctrl_err(SQL_HANDLE_ENV, shared_env_h, ret)
else:
dsn_list[dsn.value] = desc.value
return dsn_list
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for zh (Chinese)
nplurals=1 # Chinese language has ONE form!
# Always returns 0:
get_plural_id = lambda n: 0
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: word
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for it (Italian)
nplurals=2 # Italian language has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for es (Spanish)
nplurals=2 # Spanish language has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for ja (Japanese)
nplurals=1 # Japanese language has ONE form!
# Always returns 0:
get_plural_id = lambda n: 0
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: word
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for cs (Czech)
nplurals=3 # Czech language has 3 forms:
# 1 singular and 2 plurals
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: ( 0 if n==1 else
1 if 2<=n<=4 else
2 )
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for id (Malay)
nplurals=2 # Malay language has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for af (Afrikaans (South Africa))
nplurals=2 # Afrikaans language has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for sk (Slovak (Slovakia))
nplurals=3 # Slovak language has 3 forms:
# 1 singular and 2 plurals
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: (0 if n % 10 == 1 and n % 100 != 11 else
1 if n % 10 >= 2 and n % 10 <= 4 and
(n % 100 < 10 or n % 100 >= 20) else
2)
# construct_plural_form() is not used now because of complex
# rules of Slovak language. Default version of this function
# is used to simple insert new words into plural_dict dictionary)
# construct_plural_form = lambda word, plural_id: word
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for uk (Ukrainian)
nplurals=3 # Ukrainian language has 3 forms:
# 1 singular and 2 plurals
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: (0 if n % 10 == 1 and n % 100 != 11 else
1 if n % 10 >= 2 and n % 10 <= 4 and
(n % 100 < 10 or n % 100 >= 20) else
2)
# construct_plural_form() is not used now because of complex
# rules of Ukrainian language. Default version of
# this function is used to simple insert new words into
# plural_dict dictionary)
# construct_plural_form = lambda word, plural_id: word
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for pl (Polish)
nplurals=3 # Polish language has 3 forms:
# 1 singular and 2 plurals
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: (0 if n==1 else
1 if 2<=n<=4 else
2)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for ru (Russian)
nplurals=3 # Russian language has 3 forms:
# 1 singular and 2 plurals
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: (0 if n % 10 == 1 and n % 100 != 11 else
1 if n % 10 >= 2 and n % 10 <= 4 and
(n % 100 < 10 or n % 100 >= 20) else
2)
# construct_plural_form() is not used now because of complex
# rules of Russian language. Default version of
# this function is used to simple insert new words into
# plural_dict dictionary)
# construct_plural_form = lambda word, plural_id: word
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for de (Deutsch)
nplurals=2 # German language has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for bg (Bulgarian)
nplurals=2 # Bulgarian language has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for pt (Portuguese)
nplurals=2 # Portuguese has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for en (English)
nplurals=2 # English language has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
construct_plural_form = lambda word, plural_id: (word +
('es' if word[-1:] in ('s','x','o') or
word[-2:] in ('sh','ch')
else 's'))
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for sl (Slovenian)
nplurals=4 # Slovenian language has 4 forms:
# 1 singular and 3 plurals
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: (0 if n % 100 == 1 else
1 if n % 100 == 2 else
2 if n % 100 in (3,4) else
3)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for he (Hindi)
nplurals=2 # Hindi has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for hu (Hungarian)
nplurals=2 # Hungarian language has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for he (Hebrew)
nplurals=2 # Hebrew language has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for fr (French))
nplurals=2 # French language has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Plural-Forms for ro (Romanian)
nplurals=2 # Romanian has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary
# construct_plural_form = lambda word, plural_id: (word + 'suffix')
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.