code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/env python
"""Communicating sequential processes, in Python.
When using CSP Python as a DSL, this module will normally be imported
via the statement 'from csp.csp import *' and should not be imported directly.
Copyright (C) Sarah Mount, 2009-10.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have rceeived a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
__author__ = 'Sarah Mount <s.mount@wlv.ac.uk>'
__date__ = '2010-05-16'
#DEBUG = True
DEBUG = False
from functools import wraps # Easy decorators
import copy
import gc
import inspect
import logging
import os
import random
import sys
import tempfile
import threading
import time
import uuid
try:
import cPickle as pickle # Faster, only in Python 2.x
except ImportError:
import pickle
try: # Python optimisation compiler
import psyco
psyco.full()
except ImportError:
print ( 'No available optimisation' )
CSP_IMPLEMENTATION = 'os_thread'
### Names exported by this module
__all__ = ['set_debug', 'CSPProcess', 'CSPServer', 'Alt',
'Par', 'Seq', 'Guard', 'Channel', 'FileChannel',
'process', 'forever', 'Skip', '_CSPTYPES', 'CSP_IMPLEMENTATION']
### Seeded random number generator (16 bytes)
_RANGEN = random.Random(os.urandom(16))
### CONSTANTS
_BUFFSIZE = 1024
_debug = logging.debug
class CorruptedData(Exception):
"""Used to verify that data has come from an honest source.
"""
def __str__(self):
return 'Data sent with incorrect authentication key.'
class NoGuardInAlt(Exception):
"""Raised when an Alt has no guards to select.
"""
def __str__(self):
return 'Every Alt must have at least one guard.'
### Special constants / exceptions for termination and mobility
### Better not to use classes/objects here or pickle will get confused
### by the way that csp.__init__ manages the namespace.
_POISON = ';;;__POISON__;;;'
"""Used as special data sent down a channel to invoke termination."""
class ChannelPoison(Exception):
"""Used to poison a processes and propagate to all known channels.
"""
def __str__(self):
return 'Posioned channel exception.'
### DEBUGGING
def set_debug(status):
global DEBUG
DEBUG = status
logging.basicConfig(level=logging.NOTSET,
stream=sys.stdout)
logging.info("Using threading version of python-csp.")
### Fundamental CSP concepts -- Processes, Channels, Guards
class _CSPOpMixin(object):
"""Mixin class used for operator overloading in CSP process types.
"""
def __init__(self):
return
def spawn(self):
"""Start only if self is not running."""
if not self._Thread__started.is_set():
threading.Thread.start(self)
def start(self):
"""Start only if self is not running."""
if not self._Thread__started.is_set():
try:
threading.Thread.start(self)
threading.Thread.join(self)
except KeyboardInterrupt:
sys.exit()
def join(self):
"""Join only if self is running."""
if self._Thread__started.is_set():
threading.Thread.join(self)
def referent_visitor(self, referents):
for obj in referents:
if obj is self or obj is None:
continue
if isinstance(obj, Channel):
obj.poison()
elif ((hasattr(obj, '__getitem__') or hasattr(obj, '__iter__')) and
not isinstance(obj, str)):
self.referent_visitor(obj)
elif isinstance(obj, CSPProcess):
self.referent_visitor(obj.args + tuple(obj.kwargs.values()))
elif hasattr(obj, '__dict__'):
self.referent_visitor(list(obj.__dict__.values()))
def terminate(self):
"""Terminate only if self is running.
FIXME: This doesn't work yet...
"""
if self._Thread__started.is_set():
_debug('{0} terminating now...'.format(self.getName()))
threading.Thread._Thread__stop(self) # Sets an event object
def __gt__(self, other):
"""Implementation of CSP Seq."""
assert _is_csp_type(other)
seq = Seq(self, other)
seq.start()
return seq
def __mul__(self, n):
assert n > 0
procs = [self]
for i in range(n-1):
procs.append(copy.copy(self))
Seq(*procs).start()
def __rmul__(self, n):
assert n > 0
procs = [self]
for i in range(n-1):
procs.append(copy.copy(self))
Seq(*procs).start()
class CSPProcess(threading.Thread, _CSPOpMixin):
"""Implementation of CSP processes.
There are two ways to create a new CSP process. Firstly, you can
use the @process decorator to convert a function definition into a
CSP Process. Once the function has been defined, calling it will
return a new CSPProcess object which can be started manually, or
used in an expression:
>>> @process
... def foo(n):
... print 'n:', n
...
>>> foo(100).start()
>>> n: 100
>>> foo(10) // (foo(20),)
n: 10
n: 20
<Par(Par-5, initial)>
>>>
Alternatively, you can create a CSPProcess object directly and
pass a function (and its arguments) to the CSPProcess constructor:
>>> def foo(n):
... print 'n:', n
...
>>> p = CSPProcess(foo, 100)
>>> p.start()
>>> n: 100
>>>
"""
def __init__(self, func, *args, **kwargs):
threading.Thread.__init__(self,
target=func,
args=(args),
kwargs=kwargs)
assert inspect.isfunction(func) # Check we aren't using objects
assert not inspect.ismethod(func) # Check we aren't using objects
_CSPOpMixin.__init__(self)
for arg in list(self._Thread__args) + list(self._Thread__kwargs.values()):
if _is_csp_type(arg):
arg.enclosing = self
self.enclosing = None
def getName(self):
return self.ident
def getPid(self):
"""Return thread ident.
The name of this method ensures that the CSPProcess interface
in this module is identical to the one defined in
os_process.py.
"""
return self.ident
def __floordiv__(self, proclist):
"""
Run this process in parallel with a list of others.
"""
par = Par(self, *list(proclist))
par.start()
def __str__(self):
return 'CSPProcess running in TID {0}'.format(self.getName())
def run(self): #, event=None):
"""Called automatically when the L{start} methods is called.
"""
try:
self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
except ChannelPoison:
_debug('{0} in {1} got ChannelPoison exception'.format(str(self), self.getPid()))
self.referent_visitor(self._Thread__args +
tuple(self._Thread__kwargs.values()))
except KeyboardInterrupt:
sys.exit()
except Exception:
typ, excn, tback = sys.exc_info()
sys.excepthook(typ, excn, tback)
def __del__(self):
"""Run the garbage collector automatically on deletion of this
object.
This prevents the "Winder Bug" found in tests/winder_bug of
the distribution, where successive process graphs are created
in memory and, when the "outer" CSPProcess object returns from
its .start() method the process graph is not garbage
collected. This accretion of garbage can cause degenerate
behaviour which is difficult to debug, such as a program
pausing indefinitely on Channel creation.
"""
if gc is not None:
gc.collect()
class CSPServer(CSPProcess):
"""Implementation of CSP server processes.
Not intended to be used in client code. Use @forever instead.
"""
def __init__(self, func, *args, **kwargs):
CSPProcess.__init__(self, func, *args, **kwargs)
def __str__(self):
return 'CSPServer running in PID {0}'.format(self.getPid())
def run(self): #, event=None):
"""Called automatically when the L{start} methods is called.
"""
try:
generator = self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
while sys.gettrace() is None:
next(generator)
else:
# If the tracer is running execute the target only once.
next(generator)
logging.info('Server process detected a tracer running.')
# Be explicit.
return None
except ChannelPoison:
_debug('{0} in {1} got ChannelPoison exception'.format(str(self), self.getPid()))
self.referent_visitor(self._Thread__args + tuple(self._Thread__kwargs.values()))
# if self._popen is not None: self.terminate()
except KeyboardInterrupt:
sys.exit()
except Exception:
typ, excn, tback = sys.exc_info()
sys.excepthook(typ, excn, tback)
class Alt(_CSPOpMixin):
"""CSP select (OCCAM ALT) process.
python-csp process will often have access to several different
channels, or other guard types such as timer guards, and will have
to choose one of them to read from. For example, in a
producer/consumer or worker/farmer model, many producer or worker
processes will be writing values to channels and one consumer or
farmer process will be aggregating them in some way. It would be
inefficient for the consumer or farmer to read from each channel
in turn, as some channels might take longer than others. Instead,
python-csp provides support for ALTing (or ALTernating), which
enables a process to read from the first channel (or timer, or
other guard) in a list to become ready.
The simplest way to choose between channels (or other guards) is
to use choice operator: "|", as in the example below:
>>> @process
... def send_msg(chan, msg):
... chan.write(msg)
...
>>> @process
... def choice(chan1, chan2):
... # Choice chooses a channel on which to call read()
... print chan1 | chan2
... print chan1 | chan2
...
>>> c1, c2 = Channel(), Channel()
>>> choice(c1, c2) // (send_msg(c1, 'yes'), send_msg(c2, 'no'))
yes
no
<Par(Par-8, initial)>
>>>
Secondly, you can create an Alt object explicitly, and call its
select() method to perform a channel read on the next available
channel. If more than one channel is available to read from, then
an available channel is chosen at random (for this reason, ALTing
is sometimes called "non-deterministic choice":
>>> @process
... def send_msg(chan, msg):
... chan.write(msg)
...
>>> @process
... def alt_example(chan1, chan2):
... alt = Alt(chan1, chan2)
... print alt.select()
... print alt.select()
...
>>> c1, c2 = Channel(), Channel()
>>> Par(send_msg(c1, 'yes'), send_msg(c2, 'no'), alt_example(c1, c2)).start()
yes
no
>>>
In addition to the select() method, which chooses an available
guard at random, Alt provides two similar methods, fair_select()
and pri_select(). fair_select() will choose not to select the
previously selected guard, unless it is the only guard
available. This ensures that no guard will be starved twice in a
row. pri_select() will select available channels in the order in
which they were passed to the Alt() constructor, giving a simple
implementation of guard priority.
Lastly, Alt() can be used with the repetition operator (*) to
create a generator:
>>> @process
... def send_msg(chan, msg):
... chan.write(msg)
...
>>> @process
... def gen_example(chan1, chan2):
... gen = Alt(chan1, chan2) * 2
... print gen.next()
... print gen.next()
...
>>> c1, c2 = Channel(), Channel()
>>> Par(send_msg(c1, 'yes'), send_msg(c2, 'no'), gen_example(c1, c2)).start()
yes
no
>>>
"""
def __init__(self, *args):
super(Alt, self).__init__()
for arg in args:
assert isinstance(arg, Guard)
self.guards = list(args)
self.last_selected = None
def poison(self):
"""Poison the last selected guard and unlink from the guard list.
Sets self.last_selected to None.
"""
_debug(str(type(self.last_selected)))
self.last_selected.disable() # Just in case
try:
self.last_selected.poison()
except Exception:
pass
_debug('Poisoned last selected.')
self.guards.remove(self.last_selected)
_debug('{0} guards'.format(len(self.guards)))
self.last_selected = None
def _preselect(self):
"""Check for special cases when any form of select() is called.
If no object can be returned from a channel read and no
exception is raised the return None. Any select() method
should work like a Channel.read() which must always return a
value if it does not throw an exception..
"""
if len(self.guards) == 0:
raise NoGuardInAlt()
elif len(self.guards) == 1:
_debug('Alt Selecting unique guard: {0}'.format(self.guards[0].name))
self.last_selected = self.guards[0]
while not self.guards[0].is_selectable():
self.guards[0].enable()
return self.guards[0].select()
return None
def select(self):
"""Randomly select from ready guards."""
if len(self.guards) < 2:
return self._preselect()
ready = []
while len(ready) == 0:
for guard in self.guards:
guard.enable()
_debug('Alt enabled all guards')
time.sleep(0.01) # Not sure about this.
ready = [guard for guard in self.guards if guard.is_selectable()]
_debug('Alt got {0} items to choose from out of {1}'.format(len(ready), len(self.guards)))
selected = _RANGEN.choice(ready)
self.last_selected = selected
for guard in self.guards:
if guard is not selected:
guard.disable()
return selected.select()
def fair_select(self):
"""Select a guard to synchronise with. Do not select the
previously selected guard (unless it is the only guard
available).
"""
if len(self.guards) < 2:
return self._preselect()
ready = []
while len(ready) == 0:
for guard in self.guards:
guard.enable()
_debug('Alt enabled all guards')
time.sleep(0.1) # Not sure about this.
ready = [guard for guard in self.guards if guard.is_selectable()]
_debug('Alt got {0} items to choose from, out of {1}'.format(len(ready), len(self.guards)))
selected = None
if self.last_selected in ready and len(ready) > 1:
ready.remove(self.last_selected)
_debug('Alt removed last selected from ready list')
selected = _RANGEN.choice(ready)
self.last_selected = selected
for guard in self.guards:
if guard is not selected:
guard.disable()
return selected.select()
def pri_select(self):
"""Select a guard to synchronise with, in order of
"priority". The guard with the lowest index in the L{guards}
list has the highest priority.
"""
if len(self.guards) < 2:
return self._preselect()
ready = []
while len(ready) == 0:
for guard in self.guards:
guard.enable()
_debug('Alt enabled all guards')
time.sleep(0.01) # Not sure about this.
ready = [guard for guard in self.guards if guard.is_selectable()]
_debug('Alt got {0} items to choose from, out of {1}'.format(len(ready), len(self.guards)))
self.last_selected = ready[0]
for guard in ready[1:]:
guard.disable()
return ready[0].select()
def __mul__(self, n):
assert n > 0
for i in range(n):
yield self.select()
def __rmul__(self, n):
assert n > 0
for i in range(n):
yield self.select()
class Par(threading.Thread, _CSPOpMixin):
"""Run CSP processes in parallel.
There are two ways to run processes in parallel. Firstly, given
two (or more) processes you can parallelize them with the //
operator, like this:
>>> @process
... def foo(n):
... print 'n:', n
...
>>> foo(1) // (foo(2), foo(3))
n: 2
n: 1
n: 3
<Par(Par-5, initial)>
>>>
Notice that the // operator takes a CSPProcess on the left hand side
and a sequence of processes on the right hand side.
Alternatively, you can create a Par object which is a sort of CSP
process and start that process manually:
>>> p = Par(foo(100), foo(200), foo(300))
>>> p.start()
n: 100
n: 300
n: 200
>>>
"""
def __init__(self, *procs, **kwargs):
super(Par, self).__init__(None)
self.procs = []
for proc in procs:
# FIXME: only catches shallow nesting.
if isinstance(proc, Par):
self.procs += proc.procs
else:
self.procs.append(proc)
for proc in self.procs:
proc.enclosing = self
_debug('{0} processes in Par:'.format(len(self.procs)))
def __ifloordiv__(self, proclist):
"""
Run this Par in parallel with a list of others.
"""
assert hasattr(proclist, '__iter__')
self.procs = []
for proc in proclist:
# FIXME: only catches shallow nesting.
if isinstance(proc, Par):
self.procs += proc.procs
else:
self.procs.append(proc)
for proc in self.procs:
proc.enclosing = self
_debug('{0} processes added to Par by //:'.format(len(self.procs)))
self.start()
def __str__(self):
return 'CSP Par running in process {0}.'.format(self.getPid())
def terminate(self):
"""Terminate the execution of this process.
"""
for proc in self.procs:
proc.terminate()
if self._Thread__started.is_set():
Thread._Thread__stop(self)
def getPid(self):
"""Return thread ident.
The name of this method ensures that the CSPProcess interface
in this module is identical to the one defined in
cspprocess.py.
"""
return self.ident
def join(self):
for proc in self.procs:
if proc._Thread__started.is_set():
proc.join()
def start(self):
"""Start then synchronize with the execution of parallel processes.
Return when all parallel processes have returned.
"""
try:
for proc in self.procs:
proc.spawn()
for proc in self.procs:
proc.join()
except ChannelPoison:
_debug('{0} got ChannelPoison exception in {1}'.format(str(self), self.getPid()))
self.referent_visitor(self._Thread__args + tuple(self._Thread__kwargs.values()))
except KeyboardInterrupt:
sys.exit()
except Exception:
typ, excn, tback = sys.exc_info()
sys.excepthook(typ, excn, tback)
def __len__(self):
return len(self.procs)
def __getitem__(self, index):
"""Can raise an IndexError if index is not a valid index of
self.procs.
"""
return self.procs[index]
def __setitem__(self, index, value):
assert isinstance(value, CSPProcess)
self.procs[index] = value
def __contains__(self, proc):
return proc in self.procs
class Seq(threading.Thread, _CSPOpMixin):
"""Run CSP processes sequentially.
There are two ways to run processes in sequence. Firstly, given
two (or more) processes you can sequence them with the > operator,
like this:
>>> @process
... def foo(n):
... print 'n:', n
...
>>> foo(1) > foo(2) > foo(3)
n: 1
n: 2
n: 3
<Seq(Seq-14, initial)>
>>>
Secondly, you can create a Seq object which is a sort of CSP
process and start that process manually:
>>> s = Seq(foo(100), foo(200), foo(300))
>>> s.start()
n: 100
n: 200
n: 300
>>>
"""
def __init__(self, *procs):
super(Seq, self).__init__()
self.procs = []
for proc in procs:
# FIXME: only catches shallow nesting.
if isinstance(proc, Seq):
self.procs += proc.procs
else:
self.procs.append(proc)
for proc in self.procs:
proc.enclosing = self
def __str__(self):
return 'CSP Seq running in process {0}.'.format(self.getPid())
def start(self):
"""Start this process running.
"""
try:
for proc in self.procs:
_CSPOpMixin.start(proc)
proc.join()
except ChannelPoison:
_debug('{0} got ChannelPoison exception in {1}'.format(str(self), self.getPid()))
self.referent_visitor(self._Thread__args + tuple(self._Thread__kwargs.values()))
if self._popen is not None: self.terminate()
except KeyboardInterrupt:
sys.exit()
except Exception:
typ, excn, tback = sys.exc_info()
sys.excepthook(typ, excn, tback)
### Guards and channels
class Guard(object):
"""Abstract class to represent CSP guards.
All methods must be overridden in subclasses.
"""
def is_selectable(self):
"""Should return C{True} if this guard can be selected by an L{Alt}.
"""
raise NotImplementedError('Must be implemented in subclass')
def enable(self):
"""Prepare for, but do not commit to a synchronisation.
"""
raise NotImplementedError('Must be implemented in subclass')
def disable(self):
"""Roll back from an L{enable} call.
"""
raise NotImplementedError('Must be implemented in subclass')
def select(self):
"""Commit to a synchronisation started by L{enable}.
"""
raise NotImplementedError('Must be implemented in subclass')
def poison(self):
"""Terminate all processes attached to this guard.
"""
pass
def __str__(self):
return 'CSP Guard: must be subclassed.'
def __or__(self, other):
assert isinstance(other, Guard)
return Alt(self, other).select()
def __ror__(self, other):
assert isinstance(other, Guard)
return Alt(self, other).select()
class Channel(Guard):
"""CSP Channel objects.
In python-csp there are two sorts of channel. In JCSP terms these
are Any2Any, Alting channels. However, each channel creates an
operating system level pipe. Since this is a file object the
number of channels a program can create is limited to the maximum
number of files the operating system allows to be open at any one
time. To avoid this bottleneck use L{FileChannel} objects, which
close the file descriptor used for IPC after every read or write
operations. Read and write operations are, however, over 20 time
slower when performed on L{FileChannel} objects.
Subclasses of C{Channel} must call L{_setup()} in their
constructor and override L{put}, L{get}, L{__del__}.
A CSP channel can be created with the Channel class:
>>> c = Channel()
>>>
Each Channel object should have a unique name in the network:
>>> print c.name
1ca98e40-5558-11df-8e5b-002421449824
>>>
The Channel can then be passed as an argument to any CSP process
and then be used either to read (using the .read() method) or to
write (using the .write() method). For example:
>>> @process
... def send(cout, data):
... cout.write(data)
...
>>> @process
... def recv(cin):
... print 'Got:', cin.read()
...
>>> c = Channel()
>>> send(c, 100) // (recv(c),)
Got: 100
<Par(Par-3, initial)>
>>>
"""
TRUE = 1
FALSE = 0
def __init__(self):
self.name = uuid.uuid1()
self._wlock = None # Write lock protects from races between writers.
self._rlock = None # Read lock protects from races between readers.
self._plock = None
self._available = None # Released if writer has made data available.
self._taken = None # Released if reader has taken data.
self._is_alting = None # True if engaged in an Alt synchronisation.
self._is_selectable = None # True if can be selected by an Alt.
self._has_selected = None # True if already been committed to select.
self._store = None # Holds value transferred by channel
self._poisoned = None
self._setup()
super(Channel, self).__init__()
_debug('Channel created: {0}'.format(self.name))
def _setup(self):
"""Set up synchronisation.
MUST be called in __init__ of this class and all subclasses.
"""
# Process-safe synchronisation.
self._wlock = threading.RLock() # Write lock.
self._rlock = threading.RLock() # Read lock.
self._plock = threading.Lock() # Fix poisoning.
self._available = threading.Semaphore(0)
self._taken = threading.Semaphore(0)
# Process-safe synchronisation for CSP Select / Occam Alt.
self._is_alting = False
self._is_selectable = False
# Kludge to say a select has finished (to prevent the channel
# from being re-enabled). If values were really process safe
# we could just have writers set _is_selectable and read that.
self._has_selected = False
def put(self, item):
"""Put C{item} on a process-safe store.
"""
self.checkpoison()
self._store = item
# self._store = pickle.dumps(item, protocol=1)
def get(self):
"""Get a Python object from a process-safe store.
"""
self.checkpoison()
item = self._store
# item = pickle.loads(self._store)
self._store = None
return item
def is_selectable(self):
"""Test whether Alt can select this channel.
"""
_debug('Alt THINKS _is_selectable IS: {0}'.format(str(self._is_selectable)))
self.checkpoison()
return self._is_selectable
def write(self, obj):
"""Write a Python object to this channel.
"""
self.checkpoison()
_debug('+++ Write on Channel {0} started.'.format(self.name))
with self._wlock: # Protect from races between multiple writers.
# If this channel has already been selected by an Alt then
# _has_selected will be True, blocking other readers. If a
# new write is performed that flag needs to be reset for
# the new write transaction.
self._has_selected = False
# Make the object available to the reader.
self.put(obj)
# Announce the object has been released to the reader.
self._available.release()
_debug('++++ Writer on Channel {0}: _available: {1} _taken: {2}.'.format(self.name, self._available._Semaphore__value, self._taken._Semaphore__value))
# Block until the object has been read.
self._taken.acquire()
# Remove the object from the channel.
_debug('+++ Write on Channel {0} finished.'.format(self.name))
def read(self):
"""Read (and return) a Python object from this channel.
"""
self.checkpoison()
_debug('+++ Read on Channel {0} started.'.format(self.name))
with self._rlock: # Protect from races between multiple readers.
# Block until an item is in the Channel.
_debug('++++ Reader on Channel {0}: _available: {1} _taken: {2}.'.format(self.name, self._available._Semaphore__value, self._taken._Semaphore__value))
self._available.acquire()
# Get the item.
obj = self.get()
# Announce the item has been read.
self._taken.release()
_debug('+++ Read on Channel {0} finished.'.format(self.name))
return obj
def enable(self):
"""Enable a read for an Alt select.
MUST be called before L{select()} or L{is_selectable()}.
"""
self.checkpoison()
# Prevent re-synchronization.
if (self._has_selected or self._is_selectable):
# Be explicit.
return None
self._is_alting = True
with self._rlock:
# Attempt to acquire _available.
time.sleep(0.00001) # Won't work without this -- why?
if self._available.acquire(blocking=False):
self._is_selectable = True
else:
self._is_selectable = False
_debug('Enable on guard {0} _is_selectable: {1} _available: {2}'.format(self.name, str(self._is_selectable), str(self._available)))
def disable(self):
"""Disable this channel for Alt selection.
MUST be called after L{enable} if this channel is not selected.
"""
self.checkpoison()
self._is_alting = False
if self._is_selectable:
with self._rlock:
self._available.release()
self._is_selectable = False
def select(self):
"""Complete a Channel read for an Alt select.
"""
self.checkpoison()
_debug('channel select starting')
assert self._is_selectable == True
with self._rlock:
_debug('got read lock on channel {0} _available: {1}'.format(self.name, str(self._available._Semaphore__value)))
# Obtain object on Channel.
obj = self.get()
_debug('Writer got obj')
# Notify write() that object is taken.
self._taken.release()
_debug('Writer released _taken')
# Reset flags to ensure a future read / enable / select.
self._is_selectable = False
self._is_alting = False
self._has_selected = True
_debug('reset bools')
if obj == _POISON:
self.poison()
raise ChannelPoison()
return obj
def __str__(self):
return 'Channel using OS pipe for IPC.'
def checkpoison(self):
with self._plock:
if self._poisoned:
_debug('{0} is poisoned. Raising ChannelPoison()'.format(self.name))
raise ChannelPoison()
def poison(self):
"""Poison a channel causing all processes using it to terminate.
A set of communicating processes can be terminated by
"poisoning" any of the channels used by those processes. This
can be achieved by calling the poison() method on any
channel. For example:
>>> @process
... def send5(cout):
... for i in xrange(5):
... print 'send5 sending:', i
... cout.write(i)
... time.sleep(random.random() * 5)
... return
...
>>> @process
... def recv(cin):
... for i in xrange(5):
... data = cin.read()
... print 'recv got:', data
... time.sleep(random.random() * 5)
... return
...
>>> @process
... def interrupt(chan):
... time.sleep(random.random() * 7)
... print 'Poisoning channel:', chan.name
... chan.poison()
... return
...
>>> doomed = Channel()
>>> send(doomed) // (recv(doomed), poison(doomed))
send5 sending: 0
recv got: 0
send5 sending: 1
recv got: 1
send5 sending: 2
recv got: 2
send5 sending: 3
recv got: 3
send5 sending: 4
recv got: 4
Poisoning channel: 5c906e38-5559-11df-8503-002421449824
<Par(Par-5, initial)>
>>>
"""
with self._plock:
self._poisoned = True
# Avoid race conditions on any waiting readers / writers.
self._available.release()
self._taken.release()
class FileChannel(Channel):
"""Channel objects using files on disk.
C{FileChannel} objects close their files after each read or write
operation. The advantage of this is that client code can create as
many C{FileChannel} objects as it wishes (unconstrained by the
operating system's maximum number of open files). In return there
is a performance hit -- reads and writes are around 10 x slower on
C{FileChannel} objects compared to L{Channel} objects.
"""
def __init__(self):
self.name = uuid.uuid1()
self._wlock = None # Write lock.
self._rlock = None # Read lock.
self._available = None
self._taken = None
self._is_alting = None
self._is_selectable = None
self._has_selected = None
# Process-safe store.
file_d, self._fname = tempfile.mkstemp()
os.close(file_d)
self._setup()
def put(self, item):
"""Put C{item} on a process-safe store.
"""
file_d = file(self._fname, 'w')
file_d.write(pickle.dumps(item, protocol=1))
file_d.flush()
file_d.close()
def get(self):
"""Get a Python object from a process-safe store.
"""
stored = ''
while stored == '':
file_d = file(self._fname, 'r')
stored = file_d.read()
file_d.close()
# Unlinking here ensures that FileChannel objects exhibit the
# same semantics as Channel objects.
os.unlink(self._fname)
obj = pickle.loads(stored)
return obj
def __del__(self):
try:
# Necessary if the Channel has been deleted by poisoning.
os.unlink(self._fname)
except:
pass
def __str__(self):
return 'Channel using files for IPC.'
### Function decorators
def process(func):
"""Decorator to turn a function into a CSP process.
There are two ways to create a new CSP process. Firstly, you can
use the @process decorator to convert a function definition into a
CSP Process. Once the function has been defined, calling it will
return a new CSPProcess object which can be started manually, or
used in an expression:
>>> @process
... def foo(n):
... print 'n:', n
...
>>> foo(100).start()
>>> n: 100
>>> foo(10) // (foo(20),)
n: 10
n: 20
<Par(Par-5, initial)>
>>>
Alternatively, you can create a CSPProcess object directly and pass a
function (and its arguments) to the CSPProcess constructor:
>>> def foo(n):
... print 'n:', n
...
>>> p = CSPProcess(foo, 100)
>>> p.start()
>>> n: 100
>>>
"""
@wraps(func)
def _call(*args, **kwargs):
"""Call the target function."""
return CSPProcess(func, *args, **kwargs)
return _call
def forever(func):
"""Decorator to turn a function into a CSP server process.
It is preferable to use this rather than @process, to enable the
CSP tracer to terminate correctly and produce a CSP model, or
other debugging information.
A server process is one which runs in an infinite loop. You can
create a "normal" process which runs in an infinite loop, but by
using server processes you allow the python-csp debugger to
correctly generate information about your programs.
There are two ways to create a new CSP server process. Firstly,
you can use the @forever decorator to convert a generator into a
CSPServer object. Once the function has been defined, calling it
will return a new CSPServer object which can be started manually,
or used in an expression:
>>> @forever
... def integers():
... n = 0
... while True:
... print n
... n += 1
... yield
...
>>> integers().start()
>>> 0
1
2
3
4
5
...
KeyboardInterrupt
>>>
Alternatively, you can create a CSPServer object directly and pass a
function (and its arguments) to the CSPServer constructor:
>>> def integers():
... n = 0
... while True:
... print n
... n += 1
... yield
...
>>> i = CSPServer(integers)
>>> i.start()
>>> 0
1
2
3
4
5
...
KeyboardInterrupt
"""
@wraps(func)
def _call(*args, **kwargs):
"""Call the target function."""
return CSPServer(func, *args, **kwargs)
return _call
### List of CSP based types (class names). Used by _is_csp_type.
_CSPTYPES = [CSPProcess, Par, Seq, Alt]
def _is_csp_type(obj):
"""Return True if obj is any type of CSP process."""
return isinstance(obj, tuple(_CSPTYPES))
def _nop():
pass
class Skip(CSPProcess, Guard):
"""Guard which will always return C{True}. Useful in L{Alt}s where
the programmer wants to ensure that L{Alt.select} will always
synchronise with at least one guard.
Skip is a built in guard type that can be used with Alt
objects. Skip() is a default guard which is always ready and has
no effect. This is useful where you have a loop which calls
select(), pri_select() or fair_select() on an Alt object
repeatedly and you do not wish the select statement to block
waiting for a channel write, or other synchronisation. The
following is a trivial example of an Alt which uses Skip():
>>> alt = Alt(Skip())
>>> for i in xrange(5):
... print alt.select()
...
Skip
Skip
Skip
Skip
Skip
>>>
Where you have an Alt() object which mixes Skip() with other guard
types, be sure to complete all necessary channel reads or other
synchronisations, otherwise your code will hang.
"""
def __init__(self):
Guard.__init__(self)
CSPProcess.__init__(self, _nop)
self.name = '__Skip__'
def is_selectable(self):
"""Skip is always selectable."""
return True
def enable(self):
"""Has no effect."""
pass
def disable(self):
"""Has no effect."""
pass
def select(self):
"""Has no effect."""
return 'Skip'
def __str__(self):
return 'Skip guard is always selectable / process does nothing.'
| Python |
#!/usr/bin/env python
"""
Builtin guard types. For builtin processes see csp.builtins.
Copyright (C) Sarah Mount, 2010.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have rceeived a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import absolute_import
import os
import multiprocessing
import threading
import time
from .csp import *
__author__ = 'Sarah Mount <s.mount@wlv.ac.uk>'
__date__ = 'May 2010'
### Names exported by this module
__all__ = ['Timer', 'Barrier']
class Timer(Guard):
"""Guard which only commits to synchronisation when a timer has expired.
Timer objects are a type of CSP guard, like Channel types and Skip
guards. Timer objects allow code to wait for a specific period of
time before synchronising on a timer event. This can be done in a
number of ways: either by sleeping for a period of time (similar
to time.sleep in the standard library), or by setting the timer to
become selectable (by an Alt object) after a specific period of
time. For example:
>>> timer = Timer()
>>> timer.sleep(5) # sleep for 5 seconds
>>>
>>> alt = Alt(timer)
>>> timer.set_alarm(3) # become selectable 3 seconds from now
>>> alt.select() # will wait 3 seconds
>>>
"""
def __init__(self):
super(Timer, self).__init__()
self.now = time.time()
self.name = 'Timer guard created at:' + str(self.now)
self.alarm = None
def set_alarm(self, timeout):
self.now = time.time()
self.alarm = self.now + timeout
def is_selectable(self):
self.now = time.time()
if self.alarm is None:
return True
elif self.now < self.alarm:
return False
return True
def read(self):
"""Return current time.
"""
self.now = time.time()
return self.now
def sleep(self, timeout):
"""Put this process to sleep for a number of seconds.
"""
time.sleep(timeout)
def enable(self):
pass
def disable(self):
pass
def select(self):
pass
class AbstractBarrier(object):
def __init__(self, participants=0):
self.participants = participants
self.not_ready = participants
self.lock = None # MUST be overridden in subclass
self.reset(participants)
def reset(self, participants):
assert participants >= 0
with self.lock:
self.participants = participants
self.not_ready = participants
def enrol(self):
with self.lock:
self.participants += 1
self.not_ready += 1
def retire(self):
with self.lock:
self.participants -= 1
self.not_ready -= 1
if self.not_ready == 0:
self.not_ready = self.participants
self.lock.notifyAll()
assert self.not_ready >= 0
def synchronise(self):
with self.lock:
self.not_ready -= 1
if self.not_ready > 0:
self.lock.wait()
else:
self.not_ready = self.participants
self.lock.notifyAll()
def synchronise_withN(self, n):
"""Only syncrhonise when N participants are enrolled.
"""
with self.lock:
if self.participants != n:
self.lock.wait()
self.not_ready -= 1
if self.not_ready > 0:
self.lock.wait()
else:
self.not_ready = self.participants
self.lock.notifyAll()
# TODO: Move these two classes to the modules corresponding to
# their CSP process implementation (i. e. os_process/os_thread).
class BarrierThreading(AbstractBarrier):
def __init__(self):
super(BarrierThreading, self).__init__()
self.lock = threading.Condition()
class BarrierProcessing(AbstractBarrier):
def __init__(self):
super(BarrierProcessing, self).__init__()
self.lock = multiprocessing.Condition()
# Use os processes unless requested otherwise.
if CSP_IMPLEMENTATION == 'os_thread':
Barrier = BarrierThreading
else:
Barrier = BarrierProcessing
#Barrier.__doc__ = """
#
#"""
| Python |
#!/usr/bin/env python
"""Design pattern support for python-csp.
Copyright (C) Sarah Mount, 2010.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A ParTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have rceeived a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from __future__ import absolute_import
from .csp import *
__author__ = 'Sarah Mount <s.mount@wlv.ac.uk>'
__date__ = 'May 2010'
__all__ = ['TokenRing']
class TokenRing(Par):
def __init__(self, func, size, numtoks=1):
self.chans = [Channel() for channel in range(size)]
self.procs = [func(index=i,
tokens=numtoks,
numnodes=size,
inchan=self.chans[i-1],
outchan=self.chans[i]) for i in range(size)]
super(TokenRing, self).__init__(*self.procs)
| Python |
#!/usr/bin/env python
"""
Simple representation of CSP models, with graphviz and FDR2 output.
Copyright (C) Sarah Mount, 2010.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have rceeived a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'Sarah Mount <s.mount@wlv.ac.uk>'
__date__ = '2010-05-16'
class CSPModel(object):
def __init__(self):
return
def fdr(self):
"""Generate a variant of this CSP model to a text file.
Should be suitable for using as an input to the FDR2 tool.
MUST be overridden in subclasses.
"""
raise Exception('fdr() MUST be overridden in subclasses.')
def dot(self):
"""Generate a variant of this CSP model to a text file.
Should be suitable for using as an input to the graphiz toolset.
MUST be overridden in subclasses.
"""
raise Exception('fdr() MUST be overridden in subclasses.')
class Process(CSPModel):
def __init__(self, name):
CSPModel.__init__(self)
self.name = name
def fdr(self):
# WRONG, this should be a definition
return self.name.upper()
class Par(CSPModel):
def __init__(self, procs):
CSPModel.__init__(self)
self.procs = procs
def fdr(self):
if len(self.procs) == 0:
return ''
fdr_string = self.procs[0].fdr()
for proc in self.procs[1:]:
fdr_string += ' ||| ' + proc
return fdr_string
class Seq(CSPModel):
def __init__(self, procs):
CSPModel.__init__(self)
self.procs = procs
return
def fdr(self):
if len(self.procs) == 0:
return ''
fdr_string = self.procs[0].fdr()
for proc in self.procs[1:]:
fdr_string += ' ; ' + proc
return fdr_string
class Channel(CSPModel):
def __init__(self, name):
CSPModel.__init__(self)
self.name = name
return
def fdr(self):
return 'channel ' + self.name + '\n'
# def write_dotfile(filename='procgraph.dot'):
# global nodes
# global arcs
# dot = "graph pythoncsp {\n node [shape=ellipse];"
# for proc in nodes:
# dot += " " + str(proc) + ";"
# dot += "\n"
# for channel in arcs:
# for i in xrange(len(arcs[channel])):
# for j in xrange(i+1, len(arcs[channel])):
# dot += (str(arcs[channel][i]) + " -- " +
# str(arcs[channel][j]) +
# " [ label=" + str(channel) + " ];\n")
# dot += ' label = "\\n\\nCSP Process Relationships\\n";\n'
# dot += " fontsize=20;\n}"
# fh = open(filename)
# fh.write(dot)
# fh.close()
# return
# def write_png(infile='procgraph.dot', outfile='procgraph.png'):
# os.system('neato -v -Goverlap=-1 -Gsplines=true -Gsep=.1 -Gstart=-1000 Gepsilon=.0000000001 -Tpng ' + infile + ' -o' + outfile)
if __name__ == '__main__':
print ( 'WRITE SOME TESTS' )
| Python |
#!/usr/bin/env python
"""
Tracer for python-csp, intended for generating models of a python-csp
program, including process graphs, CSP traces and FDR2 models.
Some source from pycallgraph.py is used here. pycallgraph is
published under the GNU General Public License.
U{http://pycallgraph.slowchop.com/} (C) Gerald Kaszuba 2007
Copyright (C) Sarah Mount, 2009-10.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have rceeived a copy of the GNU General Public License
along with this program; if not, write to the Free Software
"""
__author__ = 'Sarah Mount <s.mount@wlv.ac.uk>'
__credits__ = 'Sarah Mount, Gerald Kaszuba'
__date__ = '2010-05-16'
import exstatic.icode
import inspect
#import linecache
import os
import sys
import types
from csp.csp import *
from distutils import sysconfig
#from exstatic.stack import Stack
from contextlib import contextmanager
# Names exported by this module
__all__ = ['start_trace', 'stop_trace', 'csptrace']
DEBUG = True
tracer = None
callgraph = []
# Functions to ignore when tracing.
#
# TODO: This is rather brittle and should be replaced, possibly with
# the sort of "globbing" system that pycallgraph uses.
# Or with a list of function calls to notice.
ignore = ('csp.tracer.tracer.start_trace',
'csp.tracer.tracer.stop_trace',
'csp.tracer.tracer.csptrace',
'<module>',
'Synchronized.getvalue',
'Synchronized.setvalue',
'csp.guards',
'csp.guards.Skip',
'csp.guards.Skip.__init__',
'csp.guards.Skip.enable',
'csp.guards.Skip.disable',
'csp.guards.Skip.select',
'csp.guards.Skip.is_selectable'
'csp.guards.Timer',
'csp.guards.Timer.__init__',
'csp.guards.AbstractBarrier',
'csp.guards.BarrierThreading',
'csp.guards.BarrierProcessing',
'csp.cspprocess.process', # Decorator
'csp.cspprocess.forever', # Decorator
'csp.cspprocess._call',
'csp.cspprocess._debug',
'csp.cspprocess._is_csp_type',
'csp.cspprocess.CSPProcess.__init__',
'csp.cspprocess.CSPProcess.spawn',
'csp.cspprocess.CSPProcess.run',
'csp.cspprocess.CSPProcess.__del__',
'csp.cspprocess.CSPServer.__init__',
'csp.cspprocess.CSPServer.spawn',
'csp.cspprocess.CSPServer.run',
'csp.cspprocess.CSPServer.__del__',
'csp.cspprocess.Par.__init__',
'csp.cspprocess.Seq.__init__',
'csp.cspprocess.Alt.__init__',
'csp.cspprocess.Channel._setup',
'csp.cspprocess.Channel.__del__',
'csp.cspprocess.Channel.__init__',
'csp.cspprocess.Channel.put',
'csp.cspprocess.Channel.get',
'csp.cspprocess.Channel.enable',
'csp.cspprocess.Channel.disable',
'csp.cspprocess.Channel.select',
'csp.cspprocess.Channel.is_selectable',
'csp.cspprocess.Channel.checkpoison',
)
@contextmanager
def csptrace():
"""csptrace is a context manager which allows a block of code to
be debugged indepentently from the rest of a program. Use
csptrace() with the Python "with" statement:
with csptrace():
# Code here will be traced.
"""
start_trace()
yield
stop_trace()
def reset_trace():
"""Reset any globals here.
TODO: Writeme!
"""
return
def start_trace():
"""Start the tracer.
Required to be overridden by sys.
"""
global tracer
tracer = Tracer()
sys.settrace(tracer.trace)
reset_trace()
return
def stop_trace():
"""Stop the tracer.
Required to be overridden by sys.
"""
sys.settrace(None)
return
class memoized(object):
"""Decorator that caches a function's return value each time it is
called. If called later with the same arguments, the cached value
is returned, and not re-evaluated.
From the Python decorator library:
U{http://wiki.python.org/moin/PythonDecoratorLibrary#Memoize}
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
if not args in self.cache:
self.cache[args] = self.func(*args)
return self.cache[args]
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
@memoized
def is_safe_type(ttype):
"""Test if a type is a "safe" type that we can print and
process. Functions and classes are not, in general, simple types
as they can contain side effecting code.
TODO: Strictly, sequence types can contain objects which
side-effect, although for now we call them safe. A better thing
would be to look inside each sequence and decide whether or not
its members are safe types.
"""
safe = (type(None),
type,
bool,
int,
int,
float,
complex,
bytes,
str,
tuple,
list,
dict,
dict,
# Note that lambdas are forbidden from side-effecting
# by the language specification.
types.LambdaType,
types.GeneratorType,
types.CodeType,
types.ModuleType,
range,
slice,
type(Ellipsis),
memoryview,
types.DictProxyType,
type(NotImplemented),
str)
if ttype in safe: return True
return False
@memoized
def _is_module_stdlib(file_name):
"""Returns True if the file_name is in the lib directory.
Source adapted from pycallgraph.py
U{http://pycallgraph.slowchop.com/} Copyright (GPLv2) Gerald
Kaszuba 2007
"""
lib_path = sysconfig.get_python_lib(standard_lib=True)
path = os.path.split(lib_path)
if path[1] == 'site-packages':
lib_path = path[0]
return file_name.lower().startswith(lib_path.lower())
def _pprint_arg(argname, value):
"""Pretty-print an argument to a function, including its type.
A special case here is the arguments to some csp types which are
lists of CSPProcess objects.
"""
if argname: output = argname + '='
else: output = ''
# Case 1: csp type with a list of processes as an argument.
if value[0] is not None and hasattr(value[0], '__iter__'):
output += '['
for val in value:
output += _pprint_arg('', val) + ', '
output = output[:-2] + ']'
# Case 2: single argument with original name with type.
elif value[0] is not None:
output += repr(value[0]) + ':' + str(value[1])
# Case 3: single anonymous argument with type.
else:
output += ':' + str(value[1])
return output
def _pprint_func(func_name, args):
"""Pretty print a function call.
Used for debugging.
"""
output = func_name + '('
# Case 1: No arguments.
if not args:
return output + ')'
for arg in args:
output += _pprint_arg(arg, args[arg]) + ', '
# Remove trailing comma.
return output[:-2] + ')'
@memoized
def _reverse_lookup(value, dictionary):
"""Reverse lookup for dictionaries.
Given a value returns the key indexing that value in dictionary, or None.
"""
for key in dictionary:
if dictionary[key] == value: return key
return None
def _find_name_in_outer_scope(bound_value, frame):
"""Look down the stack to find the original name given to a value.
Given a name in the current stack frame, find the name of that
value, where it was defined in an outer scope (or stack frame).
"""
defined_name = ''
while frame:
try:
val = _reverse_lookup(bound_value, frame.f_locals)
if val is not None: defined_name = val
except AttributeError:
pass
try:
val = _reverse_lookup(bound_value, frame.f_globals)
if val is not None: defined_name = val
except AttributeError:
pass
finally:
frame = frame.f_back
return defined_name
def _get_arguments(param, frame):
"""Get the original name of a value passed as a function argument.
Given a formal parameter name and a stack frame, return the
variable name and type of the formal parameter where it was first
defined. If the argument is a constant, return the constant if it
is safe to do so and '' otherwise.
"""
arg, ty = None, None
code = frame.f_code
if param not in code.co_freevars:
bound_value = frame.f_locals[param]
else:
bound_value = frame.f_globals[param]
if bound_value is None:
arg, ty = (None, type(None))
# Deal separately with core csp classes.
elif isinstance(bound_value, csp.cspprocess.CSPServer):
arg, ty = (bound_value._target.__name__, type(bound_value))
elif isinstance(bound_value, csp.cspprocess.CSPProcess):
arg, ty = (bound_value._target.__name__, type(bound_value))
# Deal with PARallel processes.
elif isinstance(bound_value, csp.cspprocess.Par):
targets = []
for proc in bound_value.procs:
targets.append((proc._target.__name__, type(proc)))
return targets
elif isinstance(bound_value, csp.cspprocess.Seq):
targets = []
for proc in bound_value.procs:
targets.append((proc._target.__name__, type(proc)))
return targets
elif isinstance(bound_value, csp.cspprocess.Alt):
targets = []
for guard in bound_value.guards:
targets.append((_find_name_in_outer_scope(guard, frame),
type(guard)))
return targets
# Deal with types defined outside the csp library.
elif bound_value in list(frame.f_locals.values()):
if is_safe_type(type(bound_value)):
arg, ty = (bound_value, type(bound_value))
else:
arg, ty = (_find_name_in_outer_scope(bound_value, frame),
type(bound_value))
elif bound_value in list(frame.f_globals.values()):
if is_safe_type(type(bound_value)):
arg, ty = (bound_value, type(bound_value))
else:
arg, ty = (_find_name_in_outer_scope(bound_value, frame),
type(bound_value))
else:
arg, ty = (_find_name_in_outer_scope(bound_value, frame),
type(bound_value))
return (arg, ty)
class Tracer(object):
"""
"""
def __init__(self):
pass
def trace(self, frame, event, arg):
if event == 'line':
self.trace_line(frame, event, arg)
elif event == 'exception':
self.trace_exn(frame, event, arg)
elif event == 'return':
self.trace_return(frame, event, arg)
elif event == 'call':
self.trace_call(frame, event, arg)
def trace_call(self, frame, event, arg):
"""Trace a function call.
"""
code = frame.f_code
# Stores all the parts of a human readable name of the current call.
full_name_list = []
# Work out the module name
module = inspect.getmodule(code)
if module:
module_name = module.__name__
module_path = module.__file__
# Ignore function calls from the standard library.
if _is_module_stdlib(module_path):
return self
if module_name == '__main__':
module_name = ''
else:
module_name = ''
if module_name:
full_name_list.append(module_name)
# Work out the class name.
try:
class_name = frame.f_locals['self'].__class__.__name__
full_name_list.append(class_name)
except (KeyError, AttributeError):
class_name = ''
# Work out the current function or method
func_name = code.co_name
if func_name == '?':
func_name = '__main__'
full_name_list.append(func_name)
# Create a readable representation of the current call
full_name = '.'.join(full_name_list)
# Ignore some internal method calls from the csp library.
if full_name in ignore:
return self
# Create a list of all arguments passed to the function,
# including defaults, with names as defined in outer scopes
# (rather than the formal parameter list) and types.
# func_args has pattern:
# formal_parameter : (defined_name, type)
func_args = {}
# Gather formal parameter names, their values and types.
for i in range(code.co_argcount):
func_args[code.co_varnames[i]] = None
for param in func_args:
func_args[param] = _get_arguments(param, frame)
if DEBUG: print ( _pprint_func(full_name, func_args) )
callgraph.append(exstatic.icode.Call(frame.f_lineno, full_name, func_args, []))
return self
def trace_line(self, frame, event, arg):
"""Trace a line of source code.
"""
pass
def trace_exn(self, frame, event, arg):
"""Trace an exception.
"""
pass
def trace_return(self, frame, event, arg):
"""Trace a return value from a function.
"""
pass
| Python |
#!/bin/env python
"""
Digital signal processing for python-csp.
Copyright (C) Sarah Mount, 2009.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have rceeived a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import absolute_import
from .csp import *
import math
# TODO: Use numpy for more sophisticated processes.
ACCEL_DUE_TO_GRAVITY = 9.80665
@forever
def Zip(outchan, inchannels):
"""Take data from a number of input channels, and write that
data as a single list to L{outchan}.
"""
while True:
outchan.write([chan.read() for chan in inchannels])
yield
@forever
def Unzip(inchan, outchans):
"""Continuously read tuples of data from a single input channel and send
each datum out down its own output channel.
"""
while True:
data = inchan.read()
for i in range(data):
outchans[i].write(data[i])
yield
@forever
def Sin(inchan, outchan):
while True:
outchan.write(math.sin(inchan.read()))
yield
@forever
def Cos(inchan, outchan):
while True:
outchan.write(math.cos(inchan.read()))
yield
@forever
def Tan(inchan, outchan):
while True:
outchan.write(math.tan(inchan.read()))
yield
@forever
def GenerateFloats(outchan):
counter = 0
increment = 0.1
while True:
outchan.write(counter * increment)
counter += 1
yield
@forever
def Magnitude(inchan, outchan):
while True:
acceldata = inchan.read()
mag = 0.0
for axis in acceldata: mag += axis ** 2
outchan.write(math.sqrt(mag))
yield
@forever
def Difference(inchan, outchan, window=1):
cache = 0.0
while True:
acceldata = inchan.read()
try:
outchan.write(acceldata - cache)
cache = acceldata
except IndexError:
pass
yield
@forever
def Square(inchan, outchan):
while True:
data = inchan.read()
outchan.write(data ** 2)
yield
@forever
def Normalise(inchan, outchan, start=0.0, end=100.0):
scale = end - start
while True:
outchan.write(inchan.read() / scale)
yield
@forever
def Threshold(thresh, inchan, outchan):
while True:
mag = inchan.read()
if mag >= thresh:
outchan.write(mag)
yield
| Python |
#!/usr/bin/env python
"""Communicating sequential processes, in Python.
When using CSP Python as a DSL, this module will normally be imported
via the statement 'from csp.csp import *' and should not be imported directly.
Copyright (C) Sarah Mount, 2008-10.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have rceeived a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
__author__ = 'Sarah Mount <s.mount@wlv.ac.uk>'
__date__ = '2010-05-16'
#DEBUG = True
DEBUG = False
from functools import wraps # Easy decorators
import copy
import gc
import inspect
import logging
import os
import random
import sys
import tempfile
import time
import uuid
try:
import cPickle as pickle # Faster, only in Python 2.x
except ImportError:
import pickle
try: # Python optimisation compiler
import psyco
psyco.full()
except ImportError:
print ( 'No available optimisation' )
# Multiprocessing libary -- name changed between versions.
try:
# Version 2.6 and above
import multiprocessing as processing
if sys.platform == 'win32':
import multiprocessing.reduction
except ImportError:
raise ImportError('No library available for multiprocessing.\n'+
'csp.os_process is only compatible with Python 2. 6 and above.')
CSP_IMPLEMENTATION = 'os_process'
### Names exported by this module
__all__ = ['set_debug', 'CSPProcess', 'CSPServer', 'Alt',
'Par', 'Seq', 'Guard', 'Channel', 'FileChannel',
'process', 'forever', 'Skip', '_CSPTYPES', 'CSP_IMPLEMENTATION']
### Seeded random number generator (16 bytes)
_RANGEN = random.Random(os.urandom(16))
### CONSTANTS
_BUFFSIZE = 1024
_debug = logging.debug
class CorruptedData(Exception):
"""Used to verify that data has come from an honest source.
"""
def __str__(self):
return 'Data sent with incorrect authentication key.'
class NoGuardInAlt(Exception):
"""Raised when an Alt has no guards to select.
"""
def __str__(self):
return 'Every Alt must have at least one guard.'
### Special constants / exceptions for termination and mobility
### Better not to use classes/objects here or pickle will get confused
### by the way that csp.__init__ manages the namespace.
_POISON = ';;;__POISON__;;;'
"""Used as special data sent down a channel to invoke termination."""
class ChannelPoison(Exception):
"""Used to poison a processes and propagate to all known channels.
"""
def __str__(self):
return 'Posioned channel exception.'
### DEBUGGING
def set_debug(status):
global DEBUG
DEBUG = status
logging.basicConfig(level=logging.NOTSET,
stream=sys.stdout)
logging.info("Using multiprocessing version of python-csp.")
### Fundamental CSP concepts -- Processes, Channels, Guards
class _CSPOpMixin(object):
"""Mixin class used for operator overloading in CSP process types.
"""
def __init__(self):
return
def spawn(self):
"""Start only if self is not running."""
if not self._popen:
processing.Process.start(self)
def start(self):
"""Start only if self is not running."""
if not self._popen:
try:
processing.Process.start(self)
processing.Process.join(self)
except KeyboardInterrupt:
sys.exit()
def join(self):
"""Join only if self is running."""
if self._popen:
processing.Process.join(self)
def referent_visitor(self, referents):
for obj in referents:
if obj is self or obj is None:
continue
if isinstance(obj, Channel):
obj.poison()
elif ((hasattr(obj, '__getitem__') or hasattr(obj, '__iter__')) and
not isinstance(obj, str)):
self.referent_visitor(obj)
elif isinstance(obj, CSPProcess):
self.referent_visitor(obj.args + tuple(obj.kwargs.values()))
elif hasattr(obj, '__dict__'):
self.referent_visitor(list(obj.__dict__.values()))
def terminate(self):
"""Terminate only if self is running."""
if self._popen is not None:
processing.Process.terminate(self)
def __gt__(self, other):
"""Implementation of CSP Seq."""
assert _is_csp_type(other)
seq = Seq(self, other)
seq.start()
return seq
def __mul__(self, n):
assert n > 0
procs = [self]
for i in range(n-1):
procs.append(copy.copy(self))
Seq(*procs).start()
def __rmul__(self, n):
assert n > 0
procs = [self]
for i in range(n-1):
procs.append(copy.copy(self))
Seq(*procs).start()
class CSPProcess(processing.Process, _CSPOpMixin):
"""Implementation of CSP processes.
There are two ways to create a new CSP process. Firstly, you can
use the @process decorator to convert a function definition into a
CSP Process. Once the function has been defined, calling it will
return a new CSPProcess object which can be started manually, or
used in an expression:
>>> @process
... def foo(n):
... print 'n:', n
...
>>> foo(100).start()
>>> n: 100
>>> foo(10) // (foo(20),)
n: 10
n: 20
<Par(Par-5, initial)>
>>>
Alternatively, you can create a CSPProcess object directly and
pass a function (and its arguments) to the CSPProcess constructor:
>>> def foo(n):
... print 'n:', n
...
>>> p = CSPProcess(foo, 100)
>>> p.start()
>>> n: 100
>>>
"""
def __init__(self, func, *args, **kwargs):
processing.Process.__init__(self,
target=func,
args=(args),
kwargs=kwargs)
assert inspect.isfunction(func) # Check we aren't using objects
assert not inspect.ismethod(func) # Check we aren't using objects
_CSPOpMixin.__init__(self)
for arg in list(self._args) + list(self._kwargs.values()):
if _is_csp_type(arg):
arg.enclosing = self
self.enclosing = None
def getName(self):
return self._name
def getPid(self):
return self._parent_pid
def __floordiv__(self, proclist):
"""
Run this process in parallel with a list of others.
"""
par = Par(self, *list(proclist))
par.start()
def __str__(self):
return 'CSPProcess running in PID {0}s'.format(self.getPid())
def run(self): #, event=None):
"""Called automatically when the L{start} methods is called.
"""
try:
self._target(*self._args, **self._kwargs)
except ChannelPoison:
_debug('{0}s got ChannelPoison exception in {1}'.format(str(self), self.getPid()))
self.referent_visitor(self._args + tuple(self._kwargs.values()))
# if self._popen is not None: self.terminate()
except KeyboardInterrupt:
sys.exit()
except Exception:
typ, excn, tback = sys.exc_info()
sys.excepthook(typ, excn, tback)
def __del__(self):
"""Run the garbage collector automatically on deletion of this
object.
This prevents the "Winder Bug" found in tests/winder_bug of
the distribution, where successive process graphs are created
in memory and, when the "outer" CSPProcess object returns from
its .start() method the process graph is not garbage
collected. This accretion of garbage can cause degenerate
behaviour which is difficult to debug, such as a program
pausing indefinitely on Channel creation.
"""
if gc is not None:
gc.collect()
class CSPServer(CSPProcess):
"""Implementation of CSP server processes.
Not intended to be used in client code. Use @forever instead.
"""
def __init__(self, func, *args, **kwargs):
CSPProcess.__init__(self, func, *args, **kwargs)
def __str__(self):
return 'CSPServer running in PID {0}s'.format(self.getPid())
def run(self): #, event=None):
"""Called automatically when the L{start} methods is called.
"""
try:
generator = self._target(*self._args, **self._kwargs)
while sys.gettrace() is None:
next(generator)
else:
# If the tracer is running execute the target only once.
next(generator)
logging.info('Server process detected a tracer running.')
# Be explicit.
return None
except ChannelPoison:
_debug('{0}s in {1} got ChannelPoison exception'.format(str(self), self.getPid()))
self.referent_visitor(self._args + tuple(self._kwargs.values()))
# if self._popen is not None: self.terminate()
except KeyboardInterrupt:
sys.exit()
except Exception:
typ, excn, tback = sys.exc_info()
sys.excepthook(typ, excn, tback)
class Alt(_CSPOpMixin):
"""CSP select (OCCAM ALT) process.
python-csp process will often have access to several different
channels, or other guard types such as timer guards, and will have
to choose one of them to read from. For example, in a
producer/consumer or worker/farmer model, many producer or worker
processes will be writing values to channels and one consumer or
farmer process will be aggregating them in some way. It would be
inefficient for the consumer or farmer to read from each channel
in turn, as some channels might take longer than others. Instead,
python-csp provides support for ALTing (or ALTernating), which
enables a process to read from the first channel (or timer, or
other guard) in a list to become ready.
The simplest way to choose between channels (or other guards) is
to use choice operator: "|", as in the example below:
>>> @process
... def send_msg(chan, msg):
... chan.write(msg)
...
>>> @process
... def choice(chan1, chan2):
... # Choice chooses a channel on which to call read()
... print chan1 | chan2
... print chan1 | chan2
...
>>> c1, c2 = Channel(), Channel()
>>> choice(c1, c2) // (send_msg(c1, 'yes'), send_msg(c2, 'no'))
yes
no
<Par(Par-8, initial)>
>>>
Secondly, you can create an Alt object explicitly, and call its
select() method to perform a channel read on the next available
channel. If more than one channel is available to read from, then
an available channel is chosen at random (for this reason, ALTing
is sometimes called "non-deterministic choice":
>>> @process
... def send_msg(chan, msg):
... chan.write(msg)
...
>>> @process
... def alt_example(chan1, chan2):
... alt = Alt(chan1, chan2)
... print alt.select()
... print alt.select()
...
>>> c1, c2 = Channel(), Channel()
>>> Par(send_msg(c1, 'yes'), send_msg(c2, 'no'), alt_example(c1, c2)).start()
yes
no
>>>
In addition to the select() method, which chooses an available
guard at random, Alt provides two similar methods, fair_select()
and pri_select(). fair_select() will choose not to select the
previously selected guard, unless it is the only guard
available. This ensures that no guard will be starved twice in a
row. pri_select() will select available channels in the order in
which they were passed to the Alt() constructor, giving a simple
implementation of guard priority.
Lastly, Alt() can be used with the repetition operator (*) to
create a generator:
>>> @process
... def send_msg(chan, msg):
... chan.write(msg)
...
>>> @process
... def gen_example(chan1, chan2):
... gen = Alt(chan1, chan2) * 2
... print gen.next()
... print gen.next()
...
>>> c1, c2 = Channel(), Channel()
>>> Par(send_msg(c1, 'yes'), send_msg(c2, 'no'), gen_example(c1, c2)).start()
yes
no
>>>
"""
def __init__(self, *args):
super(Alt, self).__init__()
for arg in args:
assert isinstance(arg, Guard)
self.guards = list(args)
self.last_selected = None
def poison(self):
"""Poison the last selected guard and unlink from the guard list.
Sets self.last_selected to None.
"""
_debug(str(type(self.last_selected)))
self.last_selected.disable() # Just in case
try:
self.last_selected.poison()
except Exception:
pass
_debug('Poisoned last selected.')
self.guards.remove(self.last_selected)
_debug('{0} guards'.format(len(self.guards)))
self.last_selected = None
def _preselect(self):
"""Check for special cases when any form of select() is called.
If no object can be returned from a channel read and no
exception is raised the return None. Any select() method
should work like a Channel.read() which must always return a
value if it does not throw an exception..
"""
if len(self.guards) == 0:
raise NoGuardInAlt()
elif len(self.guards) == 1:
_debug('Alt Selecting unique guard: {0}s'.format(self.guards[0].name))
self.last_selected = self.guards[0]
while not self.guards[0].is_selectable():
self.guards[0].enable()
return self.guards[0].select()
return None
def select(self):
"""Randomly select from ready guards."""
if len(self.guards) < 2:
return self._preselect()
ready = []
while len(ready) == 0:
for guard in self.guards:
guard.enable()
_debug('Alt enabled all guards')
time.sleep(0.01) # Not sure about this.
ready = [guard for guard in self.guards if guard.is_selectable()]
_debug('Alt got {0} items to choose from out of {1}'.format(len(ready), len(self.guards)))
selected = _RANGEN.choice(ready)
self.last_selected = selected
for guard in self.guards:
if guard is not selected:
guard.disable()
return selected.select()
def fair_select(self):
"""Select a guard to synchronise with. Do not select the
previously selected guard (unless it is the only guard
available).
"""
if len(self.guards) < 2:
return self._preselect()
ready = []
while len(ready) == 0:
for guard in self.guards:
guard.enable()
_debug('Alt enabled all guards')
time.sleep(0.1) # Not sure about this.
ready = [guard for guard in self.guards if guard.is_selectable()]
_debug('Alt got {0} items to choose from, out of {1}'.format(len(ready), len(self.guards)))
selected = None
if self.last_selected in ready and len(ready) > 1:
ready.remove(self.last_selected)
_debug('Alt removed last selected from ready list')
selected = _RANGEN.choice(ready)
self.last_selected = selected
for guard in self.guards:
if guard is not selected:
guard.disable()
return selected.select()
def pri_select(self):
"""Select a guard to synchronise with, in order of
"priority". The guard with the lowest index in the L{guards}
list has the highest priority.
"""
if len(self.guards) < 2:
return self._preselect()
ready = []
while len(ready) == 0:
for guard in self.guards:
guard.enable()
_debug('Alt enabled all guards')
time.sleep(0.01) # Not sure about this.
ready = [guard for guard in self.guards if guard.is_selectable()]
_debug('Alt got {0} items to choose from, out of {1}'.format(len(ready), len(self.guards)))
self.last_selected = ready[0]
for guard in ready[1:]:
guard.disable()
return ready[0].select()
def __mul__(self, n):
assert n > 0
for i in range(n):
yield self.select()
def __rmul__(self, n):
assert n > 0
for i in range(n):
yield self.select()
class Par(processing.Process, _CSPOpMixin):
"""Run CSP processes in parallel.
There are two ways to run processes in parallel. Firstly, given
two (or more) processes you can parallelize them with the //
operator, like this:
>>> @process
... def foo(n):
... print 'n:', n
...
>>> foo(1) // (foo(2), foo(3))
n: 2
n: 1
n: 3
<Par(Par-5, initial)>
>>>
Notice that the // operator takes a CSPProcess on the left hand side
and a sequence of processes on the right hand side.
Alternatively, you can create a Par object which is a sort of CSP
process and start that process manually:
>>> p = Par(foo(100), foo(200), foo(300))
>>> p.start()
n: 100
n: 300
n: 200
>>>
"""
def __init__(self, *procs, **kwargs):
super(Par, self).__init__(None)
self.procs = []
for proc in procs:
# FIXME: only catches shallow nesting.
if isinstance(proc, Par):
self.procs += proc.procs
else:
self.procs.append(proc)
for proc in self.procs:
proc.enclosing = self
_debug('{0} processes in Par:'.format(len(self.procs)))
def __ifloordiv__(self, proclist):
"""
Run this Par in parallel with a list of others.
"""
assert hasattr(proclist, '__iter__')
self.procs = []
for proc in proclist:
# FIXME: only catches shallow nesting.
if isinstance(proc, Par):
self.procs += proc.procs
else:
self.procs.append(proc)
for proc in self.procs:
proc.enclosing = self
_debug('{0} processes added to Par by //:'.format(len(self.procs)))
self.start()
def __str__(self):
return 'CSP Par running in process {0}'.format(self.getPid())
def terminate(self):
"""Terminate the execution of this process.
FIXME: Should not be recursive. Is this ever called?!
"""
for proc in self.procs:
proc.terminate()
if self._popen:
self.terminate()
def join(self):
for proc in self.procs:
if proc._popen:
proc.join()
def start(self):
"""Start then synchronize with the execution of parallel processes.
Return when all parallel processes have returned.
"""
try:
for proc in self.procs:
proc.spawn()
for proc in self.procs:
proc.join()
except ChannelPoison:
_debug('{0}s got ChannelPoison exception in {1}'.format(str(self), self.getPid()))
self.referent_visitor(self._args + tuple(self._kwargs.values()))
# if self._popen is not None: self.terminate()
except KeyboardInterrupt:
sys.exit()
except Exception:
typ, excn, tback = sys.exc_info()
sys.excepthook(typ, excn, tback)
def __len__(self):
return len(self.procs)
def __getitem__(self, index):
"""Can raise an IndexError if index is not a valid index of
self.procs.
"""
return self.procs[index]
def __setitem__(self, index, value):
assert isinstance(value, CSPProcess)
self.procs[index] = value
def __contains__(self, proc):
return proc in self.procs
class Seq(processing.Process, _CSPOpMixin):
"""Run CSP processes sequentially.
There are two ways to run processes in sequence. Firstly, given
two (or more) processes you can sequence them with the > operator,
like this:
>>> @process
... def foo(n):
... print 'n:', n
...
>>> foo(1) > foo(2) > foo(3)
n: 1
n: 2
n: 3
<Seq(Seq-14, initial)>
>>>
Secondly, you can create a Seq object which is a sort of CSP
process and start that process manually:
>>> s = Seq(foo(100), foo(200), foo(300))
>>> s.start()
n: 100
n: 200
n: 300
>>>
"""
def __init__(self, *procs):
super(Seq, self).__init__()
self.procs = []
for proc in procs:
# FIXME: only catches shallow nesting.
if isinstance(proc, Seq):
self.procs += proc.procs
else:
self.procs.append(proc)
for proc in self.procs:
proc.enclosing = self
def __str__(self):
return 'CSP Seq running in process {0}.'.format(self.getPid())
def start(self):
"""Start this process running.
"""
try:
for proc in self.procs:
_CSPOpMixin.start(proc)
proc.join()
except ChannelPoison:
_debug('{0} got ChannelPoison exception in {1}'.format(str(self), self.getPid()))
self.referent_visitor(self._args + tuple(self._kwargs.values()))
if self._popen is not None: self.terminate()
except KeyboardInterrupt:
sys.exit()
except Exception:
typ, excn, tback = sys.exc_info()
sys.excepthook(typ, excn, tback)
### Guards and channels
class Guard(object):
"""Abstract class to represent CSP guards.
All methods must be overridden in subclasses.
"""
def is_selectable(self):
"""Should return C{True} if this guard can be selected by an L{Alt}.
"""
raise NotImplementedError('Must be implemented in subclass')
def enable(self):
"""Prepare for, but do not commit to a synchronisation.
"""
raise NotImplementedError('Must be implemented in subclass')
def disable(self):
"""Roll back from an L{enable} call.
"""
raise NotImplementedError('Must be implemented in subclass')
def select(self):
"""Commit to a synchronisation started by L{enable}.
"""
raise NotImplementedError('Must be implemented in subclass')
def poison(self):
"""Terminate all processes attached to this guard.
"""
pass
def __str__(self):
return 'CSP Guard: must be subclassed.'
def __or__(self, other):
assert isinstance(other, Guard)
return Alt(self, other).select()
def __ror__(self, other):
assert isinstance(other, Guard)
return Alt(self, other).select()
class Channel(Guard):
"""CSP Channel objects.
In python-csp there are two sorts of channel. In JCSP terms these
are Any2Any, Alting channels. However, each channel creates an
operating system level pipe. Since this is a file object the
number of channels a program can create is limited to the maximum
number of files the operating system allows to be open at any one
time. To avoid this bottleneck use L{FileChannel} objects, which
close the file descriptor used for IPC after every read or write
operations. Read and write operations are, however, over 20 time
slower when performed on L{FileChannel} objects.
Subclasses of C{Channel} must call L{_setup()} in their
constructor and override L{put}, L{get}, L{__del__}.
A CSP channel can be created with the Channel class:
>>> c = Channel()
>>>
Each Channel object should have a unique name in the network:
>>> print c.name
1ca98e40-5558-11df-8e5b-002421449824
>>>
The Channel can then be passed as an argument to any CSP process
and then be used either to read (using the .read() method) or to
write (using the .write() method). For example:
>>> @process
... def send(cout, data):
... cout.write(data)
...
>>> @process
... def recv(cin):
... print 'Got:', cin.read()
...
>>> c = Channel()
>>> send(c, 100) // (recv(c),)
Got: 100
<Par(Par-3, initial)>
>>>
"""
TRUE = 1
FALSE = 0
def __init__(self):
self.name = uuid.uuid1()
self._wlock = None # Write lock protects from races between writers.
self._rlock = None # Read lock protects from races between readers.
self._plock = None
self._available = None # Released if writer has made data available.
self._taken = None # Released if reader has taken data.
self._is_alting = None # True if engaged in an Alt synchronisation.
self._is_selectable = None # True if can be selected by an Alt.
self._has_selected = None # True if already been committed to select.
self._itemr, self._itemw = os.pipe()
self._poisoned = None
self._setup()
super(Channel, self).__init__()
_debug('Channel created: {0}'.format(self.name))
def _setup(self):
"""Set up synchronisation.
MUST be called in __init__ of this class and all subclasses.
"""
# Process-safe synchronisation.
self._wlock = processing.RLock() # Write lock.
self._rlock = processing.RLock() # Read lock.
self._plock = processing.Lock() # Fix poisoning.
self._available = processing.Semaphore(0)
self._taken = processing.Semaphore(0)
# Process-safe synchronisation for CSP Select / Occam Alt.
self._is_alting = processing.Value('h', Channel.FALSE,
lock=processing.Lock())
self._is_selectable = processing.Value('h', Channel.FALSE,
lock=processing.Lock())
# Kludge to say a select has finished (to prevent the channel
# from being re-enabled). If values were really process safe
# we could just have writers set _is_selectable and read that.
self._has_selected = processing.Value('h', Channel.FALSE,
lock=processing.Lock())
# Is this channel poisoned?
self._poisoned = processing.Value('h', Channel.FALSE,
lock=processing.Lock())
def put(self, item):
"""Put C{item} on a process-safe store.
"""
self.checkpoison()
os.write(self._itemw, pickle.dumps(item, protocol=1))
def get(self):
"""Get a Python object from a process-safe store.
"""
self.checkpoison()
data = []
while True:
sval = os.read(self._itemr, _BUFFSIZE)
_debug('Read from OS pipe')
if not sval:
break
data.append(sval)
# _debug('Pipe got data: {0}, {1}'.format(len(sval), sval))
if len(sval) < _BUFFSIZE:
break
_debug('Left read loop')
_debug('About to unmarshall this data: {0}'.format(b''.join(data)))
obj = None if data == [] else pickle.loads(b''.join(data))
_debug('pickle library has unmarshalled data.')
return obj
def __del__(self):
try:
os.close(self._itemr)
os.close(self._itemw)
except:
pass
def is_selectable(self):
"""Test whether Alt can select this channel.
"""
_debug('Alt THINKS _is_selectable IS: {0}'.format(str(self._is_selectable.value == Channel.TRUE)))
self.checkpoison()
return self._is_selectable.value == Channel.TRUE
def write(self, obj):
"""Write a Python object to this channel.
"""
self.checkpoison()
_debug('+++ Write on Channel {0} started.'.format(self.name))
with self._wlock: # Protect from races between multiple writers.
# If this channel has already been selected by an Alt then
# _has_selected will be True, blocking other readers. If a
# new write is performed that flag needs to be reset for
# the new write transaction.
self._has_selected.value = Channel.FALSE
# Make the object available to the reader.
self.put(obj)
# Announce the object has been released to the reader.
self._available.release()
_debug('++++ Writer on Channel {0}: _available: {1} _taken: {2}. '.format(self.name, repr(self._available), repr(self._taken)))
# Block until the object has been read.
self._taken.acquire()
# Remove the object from the channel.
_debug('+++ Write on Channel {0} finished.'.format(self.name))
def read(self):
"""Read (and return) a Python object from this channel.
"""
# assert self._is_alting.value == Channel.FALSE
# assert self._is_selectable.value == Channel.FALSE
self.checkpoison()
_debug('+++ Read on Channel {0} started.'.format(self.name))
with self._rlock: # Protect from races between multiple readers.
# Block until an item is in the Channel.
_debug('++++ Reader on Channel {0}: _available: {1} _taken: {2}. '.format(self.name, repr(self._available), repr(self._taken)))
self._available.acquire()
# Get the item.
obj = self.get()
# Announce the item has been read.
self._taken.release()
_debug('+++ Read on Channel {0} finished.'.format(self.name))
return obj
def enable(self):
"""Enable a read for an Alt select.
MUST be called before L{select()} or L{is_selectable()}.
"""
self.checkpoison()
# Prevent re-synchronization.
if (self._has_selected.value == Channel.TRUE or
self._is_selectable.value == Channel.TRUE):
# Be explicit.
return None
self._is_alting.value = Channel.TRUE
with self._rlock:
# Attempt to acquire _available.
time.sleep(0.00001) # Won't work without this -- why?
if self._available.acquire(block=False):
self._is_selectable.value = Channel.TRUE
else:
self._is_selectable.value = Channel.FALSE
_debug('Enable on guard {0} _is_selectable: {1} _available: {2}'.format(self.name, str(self._is_selectable.value), repr(self._available)))
def disable(self):
"""Disable this channel for Alt selection.
MUST be called after L{enable} if this channel is not selected.
"""
self.checkpoison()
self._is_alting.value = Channel.FALSE
if self._is_selectable.value == Channel.TRUE:
with self._rlock:
self._available.release()
self._is_selectable.value = Channel.FALSE
def select(self):
"""Complete a Channel read for an Alt select.
"""
self.checkpoison()
_debug('channel select starting')
assert self._is_selectable.value == Channel.TRUE
with self._rlock:
_debug('got read lock on channel {0} _available: {1}'.format(self.name, repr(self._available)))
# Obtain object on Channel.
obj = self.get()
_debug('got obj')
# Notify write() that object is taken.
self._taken.release()
_debug('released _taken')
# Reset flags to ensure a future read / enable / select.
self._is_selectable.value = Channel.FALSE
self._is_alting.value = Channel.FALSE
self._has_selected.value = Channel.TRUE
_debug('reset bools')
if obj == _POISON:
self.poison()
raise ChannelPoison()
return obj
def __str__(self):
return 'Channel using OS pipe for IPC.'
def checkpoison(self):
with self._plock:
if self._poisoned.value == Channel.TRUE:
_debug('{0} is poisoned. Raising ChannelPoison()'.format(self.name))
raise ChannelPoison()
def poison(self):
"""Poison a channel causing all processes using it to terminate.
A set of communicating processes can be terminated by
"poisoning" any of the channels used by those processes. This
can be achieved by calling the poison() method on any
channel. For example:
>>> @process
... def send5(cout):
... for i in xrange(5):
... print 'send5 sending:', i
... cout.write(i)
... time.sleep(random.random() * 5)
... return
...
>>> @process
... def recv(cin):
... for i in xrange(5):
... data = cin.read()
... print 'recv got:', data
... time.sleep(random.random() * 5)
... return
...
>>> @process
... def interrupt(chan):
... time.sleep(random.random() * 7)
... print 'Poisoning channel:', chan.name
... chan.poison()
... return
...
>>> doomed = Channel()
>>> send(doomed) // (recv(doomed), poison(doomed))
send5 sending: 0
recv got: 0
send5 sending: 1
recv got: 1
send5 sending: 2
recv got: 2
send5 sending: 3
recv got: 3
send5 sending: 4
recv got: 4
Poisoning channel: 5c906e38-5559-11df-8503-002421449824
<Par(Par-5, initial)>
>>>
"""
with self._plock:
self._poisoned.value = Channel.TRUE
# Avoid race conditions on any waiting readers / writers.
self._available.release()
self._taken.release()
class FileChannel(Channel):
"""Channel objects using files on disk.
C{FileChannel} objects close their files after each read or write
operation. The advantage of this is that client code can create as
many C{FileChannel} objects as it wishes (unconstrained by the
operating system's maximum number of open files). In return there
is a performance hit -- reads and writes are around 10 x slower on
C{FileChannel} objects compared to L{Channel} objects.
"""
def __init__(self):
self.name = uuid.uuid1()
self._wlock = None # Write lock.
self._rlock = None # Read lock.
self._available = None
self._taken = None
self._is_alting = None
self._is_selectable = None
self._has_selected = None
# Process-safe store.
file_d, self._fname = tempfile.mkstemp()
os.close(file_d)
self._setup()
def put(self, item):
"""Put C{item} on a process-safe store.
"""
file_d = file(self._fname, 'w')
file_d.write(pickle.dumps(item, protocol=1))
file_d.flush()
file_d.close()
def get(self):
"""Get a Python object from a process-safe store.
"""
stored = ''
while stored == '':
file_d = file(self._fname, 'r')
stored = file_d.read()
file_d.close()
# Unlinking here ensures that FileChannel objects exhibit the
# same semantics as Channel objects.
os.unlink(self._fname)
obj = pickle.loads(stored)
return obj
def __del__(self):
try:
# Necessary if the Channel has been deleted by poisoning.
os.unlink(self._fname)
except:
pass
def __str__(self):
return 'Channel using files for IPC.'
### Function decorators
def process(func):
"""Decorator to turn a function into a CSP process.
There are two ways to create a new CSP process. Firstly, you can
use the @process decorator to convert a function definition into a
CSP Process. Once the function has been defined, calling it will
return a new CSPProcess object which can be started manually, or
used in an expression:
>>> @process
... def foo(n):
... print 'n:', n
...
>>> foo(100).start()
>>> n: 100
>>> foo(10) // (foo(20),)
n: 10
n: 20
<Par(Par-5, initial)>
>>>
Alternatively, you can create a CSPProcess object directly and pass a
function (and its arguments) to the CSPProcess constructor:
>>> def foo(n):
... print 'n:', n
...
>>> p = CSPProcess(foo, 100)
>>> p.start()
>>> n: 100
>>>
"""
@wraps(func)
def _call(*args, **kwargs):
"""Call the target function."""
return CSPProcess(func, *args, **kwargs)
return _call
def forever(func):
"""Decorator to turn a function into a CSP server process.
It is preferable to use this rather than @process, to enable the
CSP tracer to terminate correctly and produce a CSP model, or
other debugging information.
A server process is one which runs in an infinite loop. You can
create a "normal" process which runs in an infinite loop, but by
using server processes you allow the python-csp debugger to
correctly generate information about your programs.
There are two ways to create a new CSP server process. Firstly,
you can use the @forever decorator to convert a generator into a
CSPServer object. Once the function has been defined, calling it
will return a new CSPServer object which can be started manually,
or used in an expression:
>>> @forever
... def integers():
... n = 0
... while True:
... print n
... n += 1
... yield
...
>>> integers().start()
>>> 0
1
2
3
4
5
...
KeyboardInterrupt
>>>
Alternatively, you can create a CSPServer object directly and pass a
function (and its arguments) to the CSPServer constructor:
>>> def integers():
... n = 0
... while True:
... print n
... n += 1
... yield
...
>>> i = CSPServer(integers)
>>> i.start()
>>> 0
1
2
3
4
5
...
KeyboardInterrupt
"""
@wraps(func)
def _call(*args, **kwargs):
"""Call the target function."""
return CSPServer(func, *args, **kwargs)
return _call
### List of CSP based types (class names). Used by _is_csp_type.
_CSPTYPES = [CSPProcess, Par, Seq, Alt]
def _is_csp_type(obj):
"""Return True if obj is any type of CSP process."""
return isinstance(obj, tuple(_CSPTYPES))
def _nop():
pass
class Skip(CSPProcess, Guard):
"""Guard which will always return C{True}. Useful in L{Alt}s where
the programmer wants to ensure that L{Alt.select} will always
synchronise with at least one guard.
Skip is a built in guard type that can be used with Alt
objects. Skip() is a default guard which is always ready and has
no effect. This is useful where you have a loop which calls
select(), pri_select() or fair_select() on an Alt object
repeatedly and you do not wish the select statement to block
waiting for a channel write, or other synchronisation. The
following is a trivial example of an Alt which uses Skip():
>>> alt = Alt(Skip())
>>> for i in xrange(5):
... print alt.select()
...
Skip
Skip
Skip
Skip
Skip
>>>
Where you have an Alt() object which mixes Skip() with other guard
types, be sure to complete all necessary channel reads or other
synchronisations, otherwise your code will hang.
"""
def __init__(self):
Guard.__init__(self)
CSPProcess.__init__(self, _nop)
self.name = '__Skip__'
def is_selectable(self):
"""Skip is always selectable."""
return True
def enable(self):
"""Has no effect."""
pass
def disable(self):
"""Has no effect."""
pass
def select(self):
"""Has no effect."""
return 'Skip'
def __str__(self):
return 'Skip guard is always selectable / process does nothing.'
| Python |
#!/usr/bin/env python
"""
If you want to use the python-csp library then this is the file to
import. It attempts to match the best possible implementation of CSP
for your platform.
If you wish to choose to use the multiprocess (multicore) or threaded
version of the libraries explicitly then set an environment variable
in your opertaing system called "CSP". This should be either set to
"PROCESSES" or "THREADS" depending on what you want to use.
Copyright (C) Sarah Mount, 2010.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from __future__ import absolute_import
from contextlib import contextmanager
import os
import sys
### Names exported by this module
__all__ = ['set_debug', 'CSPProcess', 'CSPServer', 'Alt',
'Par', 'Seq', 'Guard', 'Channel', 'FileChannel',
'process', 'forever', 'Skip', 'CSP_IMPLEMENTATION']
__author__ = 'Sarah Mount <s.mount@wlv.ac.uk>'
__date__ = 'July 2010'
# FIXME: Simplify this logic. See the thread on "Importing different
# implementations of the library" on the mailing list.
# If multiprocessing is not available then import threads.
major, minor = sys.version_info[:2]
if (major, minor) < (2, 6):
try:
from .os_thread import *
except:
from .os_process import *
# If multiprocessing is likely to be available then let the user
# choose which version of the implementation they wish to use.
elif 'CSP' in os.environ:
if os.environ['CSP'].upper() == 'THREADS':
from .os_thread import *
else:
from .os_process import *
# If no useful information is available then try to import the
# multiprocessing version of the code else catch the resulting
# exception and use the threaded version.
else:
try:
from .os_process import *
except:
from .os_thread import *
class CSP(object):
"""Context manager to execute Python functions sequentially or in
parallel, similarly to OCCAM syntax:
csp = CSP()
with csp.seq:
csp.process(myfunc1, arg1, arg2)
with csp.par:
csp.process(myfunc2, arg1, arg2)
csp.process(myfunc3, arg1, arg2)
csp.start()
# myfunc3 and myfunc4 will be executed in parallel.
# myfunc1 and myfunc2 will be executed sequentially,
# and myfunc3 and myfunc4 will be executed after
# myfunc2 has returned.
"""
def __init__(self):
self.processes = []
@contextmanager
def par(self):
"""Context manager to execute functions in parallel.
csp = CSP()
with csp.seq:
csp.process(myfunc1, arg1, arg2)
csp.process(myfunc2, arg1, arg2)
csp.start()
# myfunc1 and myfunc2 will be executed in parallel.
"""
self.processes.append([])
yield
proc_list = self.processes.pop()
par = Par(*proc_list)
if len(self.processes) > 0:
self.processes[-1].append(par)
else:
self.processes.append(par)
return
@contextmanager
def seq(self):
"""Context manager to execute functions in sequence.
csp = CSP()
with csp.seq:
csp.process(myfunc1, arg1, arg2)
csp.process(myfunc2, arg1, arg2)
csp.start()
# myfunc1 and myfunc2 will be executed sequentially.
"""
self.processes.append([])
yield
proc_list = self.processes.pop()
seq = Seq(*proc_list)
if len(self.processes) > 0:
self.processes[-1].append(seq)
else:
self.processes.append(seq)
return
def process(self, func, *args, **kwargs):
"""Add a process to the current list of proceses.
Likely, this will be called from inside a context manager, e.g.:
csp = CSP()
with csp.par:
csp.process(myfunc1, arg1, arg2)
csp.process(myfunc2, arg1, arg2)
csp.start()
"""
self.processes[-1].append(CSPProcess(func, *args, **kwargs))
return
def start(self):
"""Start all processes in self.processes (in parallel) and run
to completion.
"""
if len(self.processes) == 0:
return
elif len(self.processes) == 1:
self.processes[0].start()
else:
Par(*self.processes).start()
return
| Python |
#!/usr/bin/env python
"""Builtin processes for python-csp. For guard types see csp.guards.
Based on the JCSP PlugNPlay package.
Copyright (C) Sarah Mount, 2009.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A ParTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have rceeived a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from __future__ import absolute_import
import math
import operator
import os
import sys
from .guards import Timer
from .csp import *
__author__ = 'Sarah Mount <s.mount@wlv.ac.uk>'
__date__ = 'May 2010'
# Names exported by this module.
__all__ = ['Sin', 'Cos', 'GenerateFloats',
'Zeroes', 'Id', 'Succ', 'Pred', 'Prefix', 'Delta2', 'Splitter',
'Mux2', 'Multiply', 'Clock', 'Printer', 'Pairs',
'Mult', 'Generate', 'FixedDelay', 'Fibonacci',
'Blackhole', 'Sign',
# Processes based on Python operators
'Plus', 'Sub', 'Mul', 'Div', 'FloorDiv', 'Mod',
'Pow', 'LShift', 'RShift', 'Neg', 'Not', 'And',
'Or', 'Nand', 'Nor', 'Xor', 'Land', 'Lor', 'Lnot',
'Lnand', 'Lnor', 'Lxor', 'Eq', 'Ne', 'Geq', 'Leq',
'Gt', 'Lt', 'Is', 'Is_Not']
@forever
def GenerateFloats(outchan, increment=0.1):
"""
readset =
writeset =
"""
counter = 0
while True:
outchan.write(counter * increment)
counter += 1
yield
@forever
def Zeroes(cout):
"""Writes out a stream of zeros.
readset =
writeset = cout
"""
while True:
cout.write(0)
yield
@forever
def Id(cin, cout):
"""Id is the CSP equivalent of lambda x: x.
readset = cin
writeset = cout
"""
while True:
cout.write(cin.read())
yield
@forever
def Succ(cin, cout):
"""Succ is the successor process, which writes out 1 + its input
event.
readset = cin
writeset = cout
"""
while True:
cout.write(cin.read() + 1)
yield
@forever
def Pred(cin, cout):
"""Pred is the predecessor process, which writes out 1 - its input
event.
readset = cin
writeset = cout
"""
while True:
cout.write(cin.read() - 1)
yield
@forever
def Prefix(cin, cout, prefix_item=None):
"""Prefix a write on L{cout} with the value read from L{cin}.
readset = cin
writeset = cout
@type prefix_item: object
@param prefix_item: prefix value to use before first item read from L{cin}.
"""
pre = prefix_item
while True:
cout.write(pre)
pre = cin.read()
yield
@forever
def Splitter(cin, cout1, cout2):
"""Splitter sends input values down two output channels.
readset = cin
writeset = cout1, cout2
"""
while True:
val = cin.read()
cout1.write(val)
cout2.write(val)
yield
Delta2 = Splitter
@forever
def Mux2(cin1, cin2, cout):
"""Mux2 provides a fair multiplex between two input channels.
readset = cin1, cin2
writeset = cout
"""
while True:
cout.write(cin1.read())
cout.write(cin2.read())
yield
@forever
def Clock(cout, resolution=1):
"""Send None object down output channel every C{resolution}
seconds.
readset =
writeset = cout
"""
timer = Timer()
while True:
timer.sleep(resolution)
cout.write(None)
yield
@forever
def Printer(cin, out=sys.stdout):
"""Print all values read from L{cin} to standard out or L{out}.
readset = cin
writeset =
"""
while True:
msg = str(cin.read()) + '\n'
out.write(msg)
yield
@forever
def Mult(cin, cout, scale):
"""Scale values read on L{cin} and write to L{cout}.
readset = cin
writeset = cout
"""
while True:
cout.write(cin.read() * scale)
yield
@forever
def Generate(cout):
"""Generate successive (+ve) ints and write to L{cout}.
readset =
writeset = cout
"""
counter = 0
while True:
cout.write(counter)
counter += 1
yield
@forever
def FixedDelay(cin, cout, delay):
"""Read values from L{cin} and write to L{cout} after a delay of
L{delay} seconds.
readset = cin
writeset = cout
"""
timer = Timer()
while True:
in1 = cin.read()
timer.sleep(delay)
cout.write(in1)
yield
@forever
def Fibonacci(cout):
"""Write successive Fibonacci numbers to L{cout}.
readset =
writeset = cout
"""
a_int = b_int = 1
while True:
cout.write(a_int)
a_int, b_int = b_int, a_int + b_int
yield
@forever
def Blackhole(cin):
"""Read values from L{cin} and do nothing with them.
readset = cin
writeset =
"""
while True:
cin.read()
yield
@forever
def Sign(cin, cout, prefix):
"""Read values from L{cin} and write to L{cout}, prefixed by L{prefix}.
readset = cin
writeset = cout
"""
while True:
val = cin.read()
cout.write(prefix + str(val))
yield
### Magic for processes built on Python operators
def _applyunop(unaryop, docstring):
"""Create a process whose output is C{unaryop(cin.read())}.
"""
chandoc = """
readset = cin
writeset = cout
"""
@forever
def _myproc(cin, cout):
while True:
in1 = cin.read()
cout.write(unaryop(in1))
yield
_myproc.__doc__ = docstring + chandoc
return _myproc
def _applybinop(binop, docstring):
"""Create a process whose output is C{binop(cin1.read(), cin2.read())}.
"""
chandoc = """
readset = cin1, cin2
writeset = cout
"""
@forever
def _myproc(cin1, cin2, cout):
while True:
in1 = cin1.read()
in2 = cin2.read()
cout.write(binop(in1, in2))
yield
_myproc.__doc__ = docstring + chandoc
return _myproc
# Use some abbreviations to shorten definitions.
unop = _applyunop
binop = _applybinop
op = operator
# Numeric operators
Plus = binop(op.add, "Emits the sum of two input events.")
Sub = binop(op.sub, "Emits the difference of two input events.")
Mul = binop(op.mul, "Emits the product of two input events.")
Div = binop(op.truediv, "Emits the division of two input events.")
FloorDiv = binop(op.floordiv, "Emits the floor div of two input events.")
Mod = binop(op.mod, "Emits the modulo of two input events.")
Pow = binop(op.pow, "Emits the power of two input events.")
Neg = unop(op.neg, "Emits the negation of input events.")
Sin = unop(math.sin, "Emit the sine of input events.")
Cos = unop(math.cos, "Emit the cosine of input events.")
Pairs = Plus
Multiply = Mul
# Bitwise operators
Not = unop(op.invert, "Emits the inverse of input events.")
And = binop(op.and_, "Emits the bitwise and of two input events.")
Or = binop(op.or_, "Emits the bitwise or of two input events.")
Nand = binop(lambda x, y: ~(x & y),
"Emits the bitwise nand of two input events.")
Nor = binop(lambda x, y: ~(x | y), "Emits the bitwise nor of two input events.")
Xor = binop(op.xor, "Emits the bitwise xor of two input events.")
LShift = binop(op.lshift, "Emits the left shift of two input events.")
RShift = binop(op.rshift, "Emits the right shift of two input events.")
# Logical operators
Land = binop(lambda x, y: x and y, "Emits the logical and of two input events.")
Lor = binop(lambda x, y: x or y, "Emits the logical or of two input events.")
Lnot = unop(op.not_, "Emits the logical not of input events.")
Lnand = binop(lambda x, y: not (x and y),
"Emits the logical nand of two input events.")
Lnor = binop(lambda x, y: not (x or y),
"Emits the logical nor of two input events.")
Lxor = binop(lambda x, y: (x or y) and (not x and y),
"Emits the logical xor of two input events.")
# Comparison operators
Eq = binop(op.eq,"Emits True if two input events are equal (==).")
Ne = binop(op.ne, "Emits True if two input events are not equal (not ==).")
Geq = binop(op.ge, "Emits True if first input event is >= second.")
Leq = binop(op.le, "Emits True if first input event is <= second.")
Gt = binop(op.gt, "Emits True if first input event is > second.")
Lt = binop(op.lt, "Emits True if first input event is < second.")
Is = binop(op.is_, "Emits True if two input events are identical.")
Is_Not = binop(op.is_not, "Emits True if two input events are not identical.")
del unop, binop, op
| Python |
#!/usr/bin/env python
"""CSP Commstime benchmark.
See F.R.M. Barnes (2006) Compiling CSP. In Proceedings of
Communicating Process Architectures 2006.
Code adapted from PyCSP by John Markus Bjorndalen, available:
http://www.cs.uit.no/~johnm/code/PyCSP/
PyCSP - Communicating Sequential Processes for Python. John Markus
Bjorndalen, Brian Vinter, Otto Anshus. CPA 2007, Surrey, UK, July
8-11, 2007. IOS Press 2007, ISBN 978-1-58603-767-3, Concurrent
Systems Engineering Series (ISSN 1383-7575).
"""
from csp.csp import *
from csp.builtins import Prefix, Delta2, Succ
import os
import time
@process
def Consumer(cin):
"""Commstime consumer process
readset = cin
writeset =
"""
N = 5000
ts = time.time
t1 = ts()
cin.read()
t1 = ts()
for i in range(N):
cin.read()
t2 = ts()
dt = t2-t1
tchan = dt / (4 * N)
print("DT = {0}.\nTime per ch : {1}/(4*{2}) = {3} s = {4} us".format(dt, dt, N, tchan, tchan * 1000000))
print("consumer done, posioning channel")
cin.poison()
def CommsTimeBM():
print('Creating channels now...')
# Create channels
a = Channel()
b = Channel()
c = Channel()
d = Channel()
print("Running commstime test")
Par(Prefix(c, a, prefix_item = 0), # Initiator
Delta2(a, b, d), # Forwarding to two
Succ(b, c), # Feeding back to prefix
Consumer(d)).start() # Timing process
print('Finished run...')
if __name__ == '__main__':
N_BM = 10
for i in range(N_BM):
print("----------- run {0}/{1} -------------".format(i+1, N_BM))
CommsTimeBM()
print("------- Commstime finished ---------")
| Python |
#!/usr/bin/env python
"""
Benchmark based on variable sized ring buffer.
See also PyCSP papers in CPA2009 proceedings.
Usage: tokenring-processes.py [options]
Options:
-h, --help show this help message and exit
-t TOKENS, --tokens=TOKENS
Number of tokens in token ring
-n NODES, --nodes=NODES
Number of nodes in token ring
-x, --experiment Experimental mode. Run 10 token rings with nodes 2^1
to 2^10 and print results
Copyright (C) Sarah Mount, 2009.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have rceeived a copy of the GNU General Public License
along with this program; if not, write to the Free Software
"""
__author__ = 'Sarah Mount <s.mount@wlv.ac.uk>'
__date__ = 'November 2009'
from csp.csp import *
from csp.patterns import TokenRing
TRIALS = 10000
@process
def ringproc(index=0, numnodes=64, tokens=1, inchan=None, outchan=None):
"""
readset = inchan
writeset = outchan
"""
if tokens == 1 and index == 0:
token = 1
outchan.write(token)
for i in range(TRIALS):
token = inchan.read()
token += 1
outchan.write(token)
# Avoid deadlock.
if index == 1:
inchan.read()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-t', '--tokens', dest='tokens',
action='store', type="int",
default=1,
help='Number of tokens in token ring')
parser.add_option('-n', '--nodes', dest='nodes',
action='store', type="int",
default=64,
help='Number of nodes in token ring')
parser.add_option('-x', '--experiment', dest='exp',
action='store_true', default=False,
help=('Experimental mode. Run 10 token rings with nodes '
+ '2^1 to 2^10 and print results'))
(options, args) = parser.parse_args()
if options.exp:
print('All times measured in microseconds.')
for size in range(2, 10):
try:
print('Token ring with {0} nodes.'.format(size))
starttime = time.time()
TokenRing(ringproc, 2 ** size, numtoks=options.tokens).start()
elapsed = time.time() - starttime
mu = elapsed * 1000000 / float((TRIALS * (2 ** size)))
print('{0}ms'.format(mu))
except:
continue
else:
import time
print 'Token ring with {0} nodes and {1} token(s).'.format(options.nodes, options.tokens)
starttime = time.time()
TokenRing(ringproc, options.nodes, numtoks=options.tokens).start()
elapsed = time.time() - starttime
mu = elapsed * 1000000 / float((TRIALS * (2 ** options.nodes)))
print '{0}ms'.format(mu)
| Python |
#!/usr/bin/env python
"""
Plotting results of variable ring buffer experiment.
Copyright (C) Sarah Mount, 2009.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have rceeived a copy of the GNU General Public License
along with this program; if not, write to the Free Software
"""
from scipy import *
from pylab import *
__author__ = 'Sarah Mount <s.mount@wlv.ac.uk>'
__date__ = 'November 2009'
FILENAME = 'token_ring.png'
subplots_adjust(hspace=0.4, wspace=0.6)
# Num of nodes in token ring
t = array([2, 4, 8, 16, 32, 64, 128, 256, 512, 1024])
yvals = {1:{'procs':array([350.057995,
314.430736, 157.215372, 78.607687, 39.303844,
19.651922, 9.825962, 4.912981, 2.456491, None, None]),
'threads':array([314.430851, 157.215429, 78.607716, 39.303859,
19.651930, 9.825965, 4.912983, 2.456492, None, None]),
'jython':array([314.431448, 157.215729, 78.607867, 39.303935,
19.651969, 9.825985, 4.912993, 2.456502, 1.228249, 0.614125])}
}
subplot(111)
title('Variable sized ring buffer \nwith one token')
plot(t, yvals[1]['procs'], 'g^-')
plot(t, yvals[1]['threads'], 'k*--')
plot(t, yvals[1]['jython'], 'rx-.')
legend(['Processes reified as OS processes',
'Processes reified as OS threads',
'Processes reified as Java threads'],
loc='upper left')
xlabel('Number of nodes in token ring')
ylabel(r'Time $(\mu{}s)$')
###
#subplot(222)
#title('16 node ring buffer \nwith three tokens')
#plot(t, yvals[1]['procs'], 'g^-')
#plot(t, yvals[1]['threads'], 'k*--')
#plot(t, yvals[1]['jython'], 'rx-.')
#legend(['Processes reified as OS processes',
# 'Processes reified as OS threads',
# 'Processes reified as Java threads'],
# loc='upper left')
#xlabel('Number of nodes in token ring')
#ylabel(r'Time $(\mu{}s)$')
###
#subplot(223)
#title('32 node ring buffer \nwith three tokens')
#plot(t, yvals[1]['procs'], 'g^-')
#plot(t, yvals[1]['threads'], 'k*--')
#plot(t, yvals[1]['jython'], 'rx-.')
#legend(['Processes reified as OS processes',
# 'Processes reified as OS threads',
# 'Processes reified as Java threads'],
# loc='upper left')
#xlabel('Number of nodes in token ring')
#ylabel(r'Time $(\mu{}s)$')
###
#subplot(224)
#title('64 node ring buffer \nwith three tokens')
#plot(t, yvals[1]['procs'], 'g^-')
#plot(t, yvals[1]['threads'], 'k*--')
#plot(t, yvals[1]['jython'], 'rx-.')
#legend(['Processes reified as OS processes',
# 'Processes reified as OS threads',
# 'Processes reified as Java threads'],
# loc='upper left')
#xlabel('Number of nodes in token ring')
#ylabel(r'Time $(\mu{}s)$')
###
grid(True)
savefig(FILENAME, format='png')
show()
| Python |
#!/usr/bin/python
import os
from urllib import urlopen
from bs4 import BeautifulSoup
output_dir = os.path.abspath('../assets/help')
assert os.path.exists(output_dir)
files_list_images = []
files_list_html = [
'Manual',
'FAQ',
'StylusSupport',
'SupportedDevices',
'Permissions' ]
def download_raw(url):
data = urlopen(url).read()
filename = url.split('/')[-1]
with open(os.path.join(output_dir, filename), 'wb') as f:
f.write(data)
def download_html(name):
url = 'http://code.google.com/p/android-quill/wiki/'+name+'?show=content'
data = urlopen(url).read()
soup = BeautifulSoup(data)
for script in soup.find_all('script'):
script.decompose()
wiki_home = '/p/android-quill/wiki/'
for link in soup.find_all('a'):
target = link.get('href', None)
if target is None:
continue
if target.startswith(wiki_home):
target = target[len(wiki_home):]
if target.count('#') == 0:
target += '.html'
elif target.count('#') == 1:
target = target.replace('#', '.html#')
else:
raise ValueError('More than one pound-sign in link??')
link['href'] = target
for img in soup.find_all('img'):
url = img['src']
if not url.startswith('http://'):
continue
files_list_images.append(url)
filename = url.split('/')[-1]
img['src'] = filename
print soup.prettify()
with open(os.path.join(output_dir, name+'.html'), 'wb') as f:
f.write(str(soup))
if __name__ == '__main__':
for f in files_list_html:
download_html(f)
for f in files_list_images:
download_raw(f)
| Python |
###
## * << Haru Free PDF Library 2.0.8 >> -- hpdf_consts.h
## *
## * URL http://libharu.org/
## *
## * Copyright (c) 1999-2006 Takeshi Kanno
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
from hpdf_types import *
#----------------------------------------------------------------------------
HPDF_TRUE =1
HPDF_FALSE =0
HPDF_OK =0
HPDF_NOERROR =0
#----- default values -------------------------------------------------------
# buffer size which is required when we convert to character string.
HPDF_TMP_BUF_SIZ =512
HPDF_SHORT_BUF_SIZ =32
HPDF_REAL_LEN =11
HPDF_INT_LEN =11
HPDF_TEXT_DEFAULT_LEN =256
HPDF_UNICODE_HEADER_LEN =2
HPDF_DATE_TIME_STR_LEN =23
# length of each item defined in PDF
HPDF_BYTE_OFFSET_LEN =10
HPDF_OBJ_ID_LEN =7
HPDF_GEN_NO_LEN =5
# default value of Graphic State
HPDF_DEF_FONT ="Helvetica"
HPDF_DEF_PAGE_LAYOUT =HPDF_PAGE_LAYOUT_SINGLE
HPDF_DEF_PAGE_MODE =HPDF_PAGE_MODE_USE_NONE
HPDF_DEF_WORDSPACE =0
HPDF_DEF_CHARSPACE =0
HPDF_DEF_FONTSIZE =10
HPDF_DEF_HSCALING =100
HPDF_DEF_LEADING =0
HPDF_DEF_RENDERING_MODE =HPDF_FILL
HPDF_DEF_RISE =0
HPDF_DEF_RAISE =HPDF_DEF_RISE
HPDF_DEF_LINEWIDTH =1
HPDF_DEF_LINECAP =HPDF_BUTT_END
HPDF_DEF_LINEJOIN =HPDF_MITER_JOIN
HPDF_DEF_MITERLIMIT =10
HPDF_DEF_FLATNESS =1
HPDF_DEF_PAGE_NUM =1
HPDF_BS_DEF_WIDTH =1
# defalt page-size
HPDF_DEF_PAGE_WIDTH =595.276
HPDF_DEF_PAGE_HEIGHT =841.89
HPDF_VERSION_TEXT ="2.0.8"
#---------------------------------------------------------------------------
#----- compression mode ----------------------------------------------------
HPDF_COMP_NONE =0x00
HPDF_COMP_TEXT =0x01
HPDF_COMP_IMAGE =0x02
HPDF_COMP_METADATA =0x04
HPDF_COMP_ALL =0x0F
HPDF_COMP_BEST_COMPRESS =0x10
HPDF_COMP_BEST_SPEED =0x20
HPDF_COMP_MASK =0xFF
#----------------------------------------------------------------------------
#----- permission flags (only Revision 2 is supported)-----------------------
HPDF_ENABLE_READ =0
HPDF_ENABLE_PRINT =4
HPDF_ENABLE_EDIT_ALL =8
HPDF_ENABLE_COPY =16
HPDF_ENABLE_EDIT =32
#----------------------------------------------------------------------------
#------ viewer preferences definitions --------------------------------------
HPDF_HIDE_TOOLBAR =1
HPDF_HIDE_MENUBAR =2
HPDF_HIDE_WINDOW_UI =4
HPDF_FIT_WINDOW =8
HPDF_CENTER_WINDOW =16
#---------------------------------------------------------------------------
#------ limitation of object implementation (PDF1.4) -----------------------
HPDF_LIMIT_MAX_INT =2147483647
HPDF_LIMIT_MIN_INT =-2147483647
HPDF_LIMIT_MAX_REAL =32767
HPDF_LIMIT_MIN_REAL =-32767
HPDF_LIMIT_MAX_STRING_LEN =65535
HPDF_LIMIT_MAX_NAME_LEN =127
HPDF_LIMIT_MAX_ARRAY =8191
HPDF_LIMIT_MAX_DICT_ELEMENT =4095
HPDF_LIMIT_MAX_XREF_ELEMENT =8388607
HPDF_LIMIT_MAX_GSTATE =28
HPDF_LIMIT_MAX_DEVICE_N =8
HPDF_LIMIT_MAX_DEVICE_N_V15 =32
HPDF_LIMIT_MAX_CID =65535
HPDF_MAX_GENERATION_NUM =65535
HPDF_MIN_PAGE_HEIGHT =3
HPDF_MIN_PAGE_WIDTH =3
HPDF_MAX_PAGE_HEIGHT =14400
HPDF_MAX_PAGE_WIDTH =14400
HPDF_MIN_MAGNIFICATION_FACTOR =8
HPDF_MAX_MAGNIFICATION_FACTOR =3200
#---------------------------------------------------------------------------
#------ limitation of various properties -----------------------------------
HPDF_MIN_PAGE_SIZE =3
HPDF_MAX_PAGE_SIZE =14400
HPDF_MIN_HORIZONTALSCALING =10
HPDF_MAX_HORIZONTALSCALING =300
HPDF_MIN_WORDSPACE =-30
HPDF_MAX_WORDSPACE =300
HPDF_MIN_CHARSPACE =-30
HPDF_MAX_CHARSPACE =300
HPDF_MAX_FONTSIZE =300
HPDF_MAX_ZOOMSIZE =10
HPDF_MAX_LEADING =300
HPDF_MAX_LINEWIDTH =100
HPDF_MAX_DASH_PATTERN =100
HPDF_MAX_JWW_NUM =128
#----------------------------------------------------------------------------
#----- country code definition ----------------------------------------------
HPDF_COUNTRY_AF ="AF" # AFGHANISTAN
HPDF_COUNTRY_AL ="AL" # ALBANIA
HPDF_COUNTRY_DZ ="DZ" # ALGERIA
HPDF_COUNTRY_AS ="AS" # AMERICAN SAMOA
HPDF_COUNTRY_AD ="AD" # ANDORRA
HPDF_COUNTRY_AO ="AO" # ANGOLA
HPDF_COUNTRY_AI ="AI" # ANGUILLA
HPDF_COUNTRY_AQ ="AQ" # ANTARCTICA
HPDF_COUNTRY_AG ="AG" # ANTIGUA AND BARBUDA
HPDF_COUNTRY_AR ="AR" # ARGENTINA
HPDF_COUNTRY_AM ="AM" # ARMENIA
HPDF_COUNTRY_AW ="AW" # ARUBA
HPDF_COUNTRY_AU ="AU" # AUSTRALIA
HPDF_COUNTRY_AT ="AT" # AUSTRIA
HPDF_COUNTRY_AZ ="AZ" # AZERBAIJAN
HPDF_COUNTRY_BS ="BS" # BAHAMAS
HPDF_COUNTRY_BH ="BH" # BAHRAIN
HPDF_COUNTRY_BD ="BD" # BANGLADESH
HPDF_COUNTRY_BB ="BB" # BARBADOS
HPDF_COUNTRY_BY ="BY" # BELARUS
HPDF_COUNTRY_BE ="BE" # BELGIUM
HPDF_COUNTRY_BZ ="BZ" # BELIZE
HPDF_COUNTRY_BJ ="BJ" # BENIN
HPDF_COUNTRY_BM ="BM" # BERMUDA
HPDF_COUNTRY_BT ="BT" # BHUTAN
HPDF_COUNTRY_BO ="BO" # BOLIVIA
HPDF_COUNTRY_BA ="BA" # BOSNIA AND HERZEGOWINA
HPDF_COUNTRY_BW ="BW" # BOTSWANA
HPDF_COUNTRY_BV ="BV" # BOUVET ISLAND
HPDF_COUNTRY_BR ="BR" # BRAZIL
HPDF_COUNTRY_IO ="IO" # BRITISH INDIAN OCEAN TERRITORY
HPDF_COUNTRY_BN ="BN" # BRUNEI DARUSSALAM
HPDF_COUNTRY_BG ="BG" # BULGARIA
HPDF_COUNTRY_BF ="BF" # BURKINA FASO
HPDF_COUNTRY_BI ="BI" # BURUNDI
HPDF_COUNTRY_KH ="KH" # CAMBODIA
HPDF_COUNTRY_CM ="CM" # CAMEROON
HPDF_COUNTRY_CA ="CA" # CANADA
HPDF_COUNTRY_CV ="CV" # CAPE VERDE
HPDF_COUNTRY_KY ="KY" # CAYMAN ISLANDS
HPDF_COUNTRY_CF ="CF" # CENTRAL AFRICAN REPUBLIC
HPDF_COUNTRY_TD ="TD" # CHAD
HPDF_COUNTRY_CL ="CL" # CHILE
HPDF_COUNTRY_CN ="CN" # CHINA
HPDF_COUNTRY_CX ="CX" # CHRISTMAS ISLAND
HPDF_COUNTRY_CC ="CC" # COCOS (KEELING) ISLANDS
HPDF_COUNTRY_CO ="CO" # COLOMBIA
HPDF_COUNTRY_KM ="KM" # COMOROS
HPDF_COUNTRY_CG ="CG" # CONGO
HPDF_COUNTRY_CK ="CK" # COOK ISLANDS
HPDF_COUNTRY_CR ="CR" # COSTA RICA
HPDF_COUNTRY_CI ="CI" # COTE D'IVOIRE
HPDF_COUNTRY_HR ="HR" # CROATIA (local name: Hrvatska)
HPDF_COUNTRY_CU ="CU" # CUBA
HPDF_COUNTRY_CY ="CY" # CYPRUS
HPDF_COUNTRY_CZ ="CZ" # CZECH REPUBLIC
HPDF_COUNTRY_DK ="DK" # DENMARK
HPDF_COUNTRY_DJ ="DJ" # DJIBOUTI
HPDF_COUNTRY_DM ="DM" # DOMINICA
HPDF_COUNTRY_DO ="DO" # DOMINICAN REPUBLIC
HPDF_COUNTRY_TP ="TP" # EAST TIMOR
HPDF_COUNTRY_EC ="EC" # ECUADOR
HPDF_COUNTRY_EG ="EG" # EGYPT
HPDF_COUNTRY_SV ="SV" # EL SALVADOR
HPDF_COUNTRY_GQ ="GQ" # EQUATORIAL GUINEA
HPDF_COUNTRY_ER ="ER" # ERITREA
HPDF_COUNTRY_EE ="EE" # ESTONIA
HPDF_COUNTRY_ET ="ET" # ETHIOPIA
HPDF_COUNTRY_FK ="FK" # FALKLAND ISLANDS (MALVINAS)
HPDF_COUNTRY_FO ="FO" # FAROE ISLANDS
HPDF_COUNTRY_FJ ="FJ" # FIJI
HPDF_COUNTRY_FI ="FI" # FINLAND
HPDF_COUNTRY_FR ="FR" # FRANCE
HPDF_COUNTRY_FX ="FX" # FRANCE, METROPOLITAN
HPDF_COUNTRY_GF ="GF" # FRENCH GUIANA
HPDF_COUNTRY_PF ="PF" # FRENCH POLYNESIA
HPDF_COUNTRY_TF ="TF" # FRENCH SOUTHERN TERRITORIES
HPDF_COUNTRY_GA ="GA" # GABON
HPDF_COUNTRY_GM ="GM" # GAMBIA
HPDF_COUNTRY_GE ="GE" # GEORGIA
HPDF_COUNTRY_DE ="DE" # GERMANY
HPDF_COUNTRY_GH ="GH" # GHANA
HPDF_COUNTRY_GI ="GI" # GIBRALTAR
HPDF_COUNTRY_GR ="GR" # GREECE
HPDF_COUNTRY_GL ="GL" # GREENLAND
HPDF_COUNTRY_GD ="GD" # GRENADA
HPDF_COUNTRY_GP ="GP" # GUADELOUPE
HPDF_COUNTRY_GU ="GU" # GUAM
HPDF_COUNTRY_GT ="GT" # GUATEMALA
HPDF_COUNTRY_GN ="GN" # GUINEA
HPDF_COUNTRY_GW ="GW" # GUINEA-BISSAU
HPDF_COUNTRY_GY ="GY" # GUYANA
HPDF_COUNTRY_HT ="HT" # HAITI
HPDF_COUNTRY_HM ="HM" # HEARD AND MC DONALD ISLANDS
HPDF_COUNTRY_HN ="HN" # HONDURAS
HPDF_COUNTRY_HK ="HK" # HONG KONG
HPDF_COUNTRY_HU ="HU" # HUNGARY
HPDF_COUNTRY_IS ="IS" # ICELAND
HPDF_COUNTRY_IN ="IN" # INDIA
HPDF_COUNTRY_ID ="ID" # INDONESIA
HPDF_COUNTRY_IR ="IR" # IRAN (ISLAMIC REPUBLIC OF)
HPDF_COUNTRY_IQ ="IQ" # IRAQ
HPDF_COUNTRY_IE ="IE" # IRELAND
HPDF_COUNTRY_IL ="IL" # ISRAEL
HPDF_COUNTRY_IT ="IT" # ITALY
HPDF_COUNTRY_JM ="JM" # JAMAICA
HPDF_COUNTRY_JP ="JP" # JAPAN
HPDF_COUNTRY_JO ="JO" # JORDAN
HPDF_COUNTRY_KZ ="KZ" # KAZAKHSTAN
HPDF_COUNTRY_KE ="KE" # KENYA
HPDF_COUNTRY_KI ="KI" # KIRIBATI
HPDF_COUNTRY_KP ="KP" # KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF
HPDF_COUNTRY_KR ="KR" # KOREA, REPUBLIC OF
HPDF_COUNTRY_KW ="KW" # KUWAIT
HPDF_COUNTRY_KG ="KG" # KYRGYZSTAN
HPDF_COUNTRY_LA ="LA" # LAO PEOPLE'S DEMOCRATIC REPUBLIC
HPDF_COUNTRY_LV ="LV" # LATVIA
HPDF_COUNTRY_LB ="LB" # LEBANON
HPDF_COUNTRY_LS ="LS" # LESOTHO
HPDF_COUNTRY_LR ="LR" # LIBERIA
HPDF_COUNTRY_LY ="LY" # LIBYAN ARAB JAMAHIRIYA
HPDF_COUNTRY_LI ="LI" # LIECHTENSTEIN
HPDF_COUNTRY_LT ="LT" # LITHUANIA
HPDF_COUNTRY_LU ="LU" # LUXEMBOURG
HPDF_COUNTRY_MO ="MO" # MACAU
HPDF_COUNTRY_MK ="MK" # MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF
HPDF_COUNTRY_MG ="MG" # MADAGASCAR
HPDF_COUNTRY_MW ="MW" # MALAWI
HPDF_COUNTRY_MY ="MY" # MALAYSIA
HPDF_COUNTRY_MV ="MV" # MALDIVES
HPDF_COUNTRY_ML ="ML" # MALI
HPDF_COUNTRY_MT ="MT" # MALTA
HPDF_COUNTRY_MH ="MH" # MARSHALL ISLANDS
HPDF_COUNTRY_MQ ="MQ" # MARTINIQUE
HPDF_COUNTRY_MR ="MR" # MAURITANIA
HPDF_COUNTRY_MU ="MU" # MAURITIUS
HPDF_COUNTRY_YT ="YT" # MAYOTTE
HPDF_COUNTRY_MX ="MX" # MEXICO
HPDF_COUNTRY_FM ="FM" # MICRONESIA, FEDERATED STATES OF
HPDF_COUNTRY_MD ="MD" # MOLDOVA, REPUBLIC OF
HPDF_COUNTRY_MC ="MC" # MONACO
HPDF_COUNTRY_MN ="MN" # MONGOLIA
HPDF_COUNTRY_MS ="MS" # MONTSERRAT
HPDF_COUNTRY_MA ="MA" # MOROCCO
HPDF_COUNTRY_MZ ="MZ" # MOZAMBIQUE
HPDF_COUNTRY_MM ="MM" # MYANMAR
HPDF_COUNTRY_NA ="NA" # NAMIBIA
HPDF_COUNTRY_NR ="NR" # NAURU
HPDF_COUNTRY_NP ="NP" # NEPAL
HPDF_COUNTRY_NL ="NL" # NETHERLANDS
HPDF_COUNTRY_AN ="AN" # NETHERLANDS ANTILLES
HPDF_COUNTRY_NC ="NC" # NEW CALEDONIA
HPDF_COUNTRY_NZ ="NZ" # NEW ZEALAND
HPDF_COUNTRY_NI ="NI" # NICARAGUA
HPDF_COUNTRY_NE ="NE" # NIGER
HPDF_COUNTRY_NG ="NG" # NIGERIA
HPDF_COUNTRY_NU ="NU" # NIUE
HPDF_COUNTRY_NF ="NF" # NORFOLK ISLAND
HPDF_COUNTRY_MP ="MP" # NORTHERN MARIANA ISLANDS
HPDF_COUNTRY_NO ="NO" # NORWAY
HPDF_COUNTRY_OM ="OM" # OMAN
HPDF_COUNTRY_PK ="PK" # PAKISTAN
HPDF_COUNTRY_PW ="PW" # PALAU
HPDF_COUNTRY_PA ="PA" # PANAMA
HPDF_COUNTRY_PG ="PG" # PAPUA NEW GUINEA
HPDF_COUNTRY_PY ="PY" # PARAGUAY
HPDF_COUNTRY_PE ="PE" # PERU
HPDF_COUNTRY_PH ="PH" # PHILIPPINES
HPDF_COUNTRY_PN ="PN" # PITCAIRN
HPDF_COUNTRY_PL ="PL" # POLAND
HPDF_COUNTRY_PT ="PT" # PORTUGAL
HPDF_COUNTRY_PR ="PR" # PUERTO RICO
HPDF_COUNTRY_QA ="QA" # QATAR
HPDF_COUNTRY_RE ="RE" # REUNION
HPDF_COUNTRY_RO ="RO" # ROMANIA
HPDF_COUNTRY_RU ="RU" # RUSSIAN FEDERATION
HPDF_COUNTRY_RW ="RW" # RWANDA
HPDF_COUNTRY_KN ="KN" # SAINT KITTS AND NEVIS
HPDF_COUNTRY_LC ="LC" # SAINT LUCIA
HPDF_COUNTRY_VC ="VC" # SAINT VINCENT AND THE GRENADINES
HPDF_COUNTRY_WS ="WS" # SAMOA
HPDF_COUNTRY_SM ="SM" # SAN MARINO
HPDF_COUNTRY_ST ="ST" # SAO TOME AND PRINCIPE
HPDF_COUNTRY_SA ="SA" # SAUDI ARABIA
HPDF_COUNTRY_SN ="SN" # SENEGAL
HPDF_COUNTRY_SC ="SC" # SEYCHELLES
HPDF_COUNTRY_SL ="SL" # SIERRA LEONE
HPDF_COUNTRY_SG ="SG" # SINGAPORE
HPDF_COUNTRY_SK ="SK" # SLOVAKIA (Slovak Republic)
HPDF_COUNTRY_SI ="SI" # SLOVENIA
HPDF_COUNTRY_SB ="SB" # SOLOMON ISLANDS
HPDF_COUNTRY_SO ="SO" # SOMALIA
HPDF_COUNTRY_ZA ="ZA" # SOUTH AFRICA
HPDF_COUNTRY_ES ="ES" # SPAIN
HPDF_COUNTRY_LK ="LK" # SRI LANKA
HPDF_COUNTRY_SH ="SH" # ST. HELENA
HPDF_COUNTRY_PM ="PM" # ST. PIERRE AND MIQUELON
HPDF_COUNTRY_SD ="SD" # SUDAN
HPDF_COUNTRY_SR ="SR" # SURINAME
HPDF_COUNTRY_SJ ="SJ" # SVALBARD AND JAN MAYEN ISLANDS
HPDF_COUNTRY_SZ ="SZ" # SWAZILAND
HPDF_COUNTRY_SE ="SE" # SWEDEN
HPDF_COUNTRY_CH ="CH" # SWITZERLAND
HPDF_COUNTRY_SY ="SY" # SYRIAN ARAB REPUBLIC
HPDF_COUNTRY_TW ="TW" # TAIWAN, PROVINCE OF CHINA
HPDF_COUNTRY_TJ ="TJ" # TAJIKISTAN
HPDF_COUNTRY_TZ ="TZ" # TANZANIA, UNITED REPUBLIC OF
HPDF_COUNTRY_TH ="TH" # THAILAND
HPDF_COUNTRY_TG ="TG" # TOGO
HPDF_COUNTRY_TK ="TK" # TOKELAU
HPDF_COUNTRY_TO ="TO" # TONGA
HPDF_COUNTRY_TT ="TT" # TRINIDAD AND TOBAGO
HPDF_COUNTRY_TN ="TN" # TUNISIA
HPDF_COUNTRY_TR ="TR" # TURKEY
HPDF_COUNTRY_TM ="TM" # TURKMENISTAN
HPDF_COUNTRY_TC ="TC" # TURKS AND CAICOS ISLANDS
HPDF_COUNTRY_TV ="TV" # TUVALU
HPDF_COUNTRY_UG ="UG" # UGANDA
HPDF_COUNTRY_UA ="UA" # UKRAINE
HPDF_COUNTRY_AE ="AE" # UNITED ARAB EMIRATES
HPDF_COUNTRY_GB ="GB" # UNITED KINGDOM
HPDF_COUNTRY_US ="US" # UNITED STATES
HPDF_COUNTRY_UM ="UM" # UNITED STATES MINOR OUTLYING ISLANDS
HPDF_COUNTRY_UY ="UY" # URUGUAY
HPDF_COUNTRY_UZ ="UZ" # UZBEKISTAN
HPDF_COUNTRY_VU ="VU" # VANUATU
HPDF_COUNTRY_VA ="VA" # VATICAN CITY STATE (HOLY SEE)
HPDF_COUNTRY_VE ="VE" # VENEZUELA
HPDF_COUNTRY_VN ="VN" # VIET NAM
HPDF_COUNTRY_VG ="VG" # VIRGIN ISLANDS (BRITISH)
HPDF_COUNTRY_VI ="VI" # VIRGIN ISLANDS (U.S.)
HPDF_COUNTRY_WF ="WF" # WALLIS AND FUTUNA ISLANDS
HPDF_COUNTRY_EH ="EH" # WESTERN SAHARA
HPDF_COUNTRY_YE ="YE" # YEMEN
HPDF_COUNTRY_YU ="YU" # YUGOSLAVIA
HPDF_COUNTRY_ZR ="ZR" # ZAIRE
HPDF_COUNTRY_ZM ="ZM" # ZAMBIA
HPDF_COUNTRY_ZW ="ZW" # ZIMBABWE
#----------------------------------------------------------------------------
#----- lang code definition -------------------------------------------------
HPDF_LANG_AA ="aa" # Afar
HPDF_LANG_AB ="ab" # Abkhazian
HPDF_LANG_AF ="af" # Afrikaans
HPDF_LANG_AM ="am" # Amharic
HPDF_LANG_AR ="ar" # Arabic
HPDF_LANG_AS ="as" # Assamese
HPDF_LANG_AY ="ay" # Aymara
HPDF_LANG_AZ ="az" # Azerbaijani
HPDF_LANG_BA ="ba" # Bashkir
HPDF_LANG_BE ="be" # Byelorussian
HPDF_LANG_BG ="bg" # Bulgarian
HPDF_LANG_BH ="bh" # Bihari
HPDF_LANG_BI ="bi" # Bislama
HPDF_LANG_BN ="bn" # Bengali Bangla
HPDF_LANG_BO ="bo" # Tibetan
HPDF_LANG_BR ="br" # Breton
HPDF_LANG_CA ="ca" # Catalan
HPDF_LANG_CO ="co" # Corsican
HPDF_LANG_CS ="cs" # Czech
HPDF_LANG_CY ="cy" # Welsh
HPDF_LANG_DA ="da" # Danish
HPDF_LANG_DE ="de" # German
HPDF_LANG_DZ ="dz" # Bhutani
HPDF_LANG_EL ="el" # Greek
HPDF_LANG_EN ="en" # English
HPDF_LANG_EO ="eo" # Esperanto
HPDF_LANG_ES ="es" # Spanish
HPDF_LANG_ET ="et" # Estonian
HPDF_LANG_EU ="eu" # Basque
HPDF_LANG_FA ="fa" # Persian
HPDF_LANG_FI ="fi" # Finnish
HPDF_LANG_FJ ="fj" # Fiji
HPDF_LANG_FO ="fo" # Faeroese
HPDF_LANG_FR ="fr" # French
HPDF_LANG_FY ="fy" # Frisian
HPDF_LANG_GA ="ga" # Irish
HPDF_LANG_GD ="gd" # Scots Gaelic
HPDF_LANG_GL ="gl" # Galician
HPDF_LANG_GN ="gn" # Guarani
HPDF_LANG_GU ="gu" # Gujarati
HPDF_LANG_HA ="ha" # Hausa
HPDF_LANG_HI ="hi" # Hindi
HPDF_LANG_HR ="hr" # Croatian
HPDF_LANG_HU ="hu" # Hungarian
HPDF_LANG_HY ="hy" # Armenian
HPDF_LANG_IA ="ia" # Interlingua
HPDF_LANG_IE ="ie" # Interlingue
HPDF_LANG_IK ="ik" # Inupiak
HPDF_LANG_IN ="in" # Indonesian
HPDF_LANG_IS ="is" # Icelandic
HPDF_LANG_IT ="it" # Italian
HPDF_LANG_IW ="iw" # Hebrew
HPDF_LANG_JA ="ja" # Japanese
HPDF_LANG_JI ="ji" # Yiddish
HPDF_LANG_JW ="jw" # Javanese
HPDF_LANG_KA ="ka" # Georgian
HPDF_LANG_KK ="kk" # Kazakh
HPDF_LANG_KL ="kl" # Greenlandic
HPDF_LANG_KM ="km" # Cambodian
HPDF_LANG_KN ="kn" # Kannada
HPDF_LANG_KO ="ko" # Korean
HPDF_LANG_KS ="ks" # Kashmiri
HPDF_LANG_KU ="ku" # Kurdish
HPDF_LANG_KY ="ky" # Kirghiz
HPDF_LANG_LA ="la" # Latin
HPDF_LANG_LN ="ln" # Lingala
HPDF_LANG_LO ="lo" # Laothian
HPDF_LANG_LT ="lt" # Lithuanian
HPDF_LANG_LV ="lv" # Latvian,Lettish
HPDF_LANG_MG ="mg" # Malagasy
HPDF_LANG_MI ="mi" # Maori
HPDF_LANG_MK ="mk" # Macedonian
HPDF_LANG_ML ="ml" # Malayalam
HPDF_LANG_MN ="mn" # Mongolian
HPDF_LANG_MO ="mo" # Moldavian
HPDF_LANG_MR ="mr" # Marathi
HPDF_LANG_MS ="ms" # Malay
HPDF_LANG_MT ="mt" # Maltese
HPDF_LANG_MY ="my" # Burmese
HPDF_LANG_NA ="na" # Nauru
HPDF_LANG_NE ="ne" # Nepali
HPDF_LANG_NL ="nl" # Dutch
HPDF_LANG_NO ="no" # Norwegian
HPDF_LANG_OC ="oc" # Occitan
HPDF_LANG_OM ="om" # (Afan)Oromo
HPDF_LANG_OR ="or" # Oriya
HPDF_LANG_PA ="pa" # Punjabi
HPDF_LANG_PL ="pl" # Polish
HPDF_LANG_PS ="ps" # Pashto,Pushto
HPDF_LANG_PT ="pt" # Portuguese
HPDF_LANG_QU ="qu" # Quechua
HPDF_LANG_RM ="rm" # Rhaeto-Romance
HPDF_LANG_RN ="rn" # Kirundi
HPDF_LANG_RO ="ro" # Romanian
HPDF_LANG_RU ="ru" # Russian
HPDF_LANG_RW ="rw" # Kinyarwanda
HPDF_LANG_SA ="sa" # Sanskrit
HPDF_LANG_SD ="sd" # Sindhi
HPDF_LANG_SG ="sg" # Sangro
HPDF_LANG_SH ="sh" # Serbo-Croatian
HPDF_LANG_SI ="si" # Singhalese
HPDF_LANG_SK ="sk" # Slovak
HPDF_LANG_SL ="sl" # Slovenian
HPDF_LANG_SM ="sm" # Samoan
HPDF_LANG_SN ="sn" # Shona
HPDF_LANG_SO ="so" # Somali
HPDF_LANG_SQ ="sq" # Albanian
HPDF_LANG_SR ="sr" # Serbian
HPDF_LANG_SS ="ss" # Siswati
HPDF_LANG_ST ="st" # Sesotho
HPDF_LANG_SU ="su" # Sundanese
HPDF_LANG_SV ="sv" # Swedish
HPDF_LANG_SW ="sw" # Swahili
HPDF_LANG_TA ="ta" # Tamil
HPDF_LANG_TE ="te" # Tegulu
HPDF_LANG_TG ="tg" # Tajik
HPDF_LANG_TH ="th" # Thai
HPDF_LANG_TI ="ti" # Tigrinya
HPDF_LANG_TK ="tk" # Turkmen
HPDF_LANG_TL ="tl" # Tagalog
HPDF_LANG_TN ="tn" # Setswanato Tonga
HPDF_LANG_TR ="tr" # Turkish
HPDF_LANG_TS ="ts" # Tsonga
HPDF_LANG_TT ="tt" # Tatar
HPDF_LANG_TW ="tw" # Twi
HPDF_LANG_UK ="uk" # Ukrainian
HPDF_LANG_UR ="ur" # Urdu
HPDF_LANG_UZ ="uz" # Uzbek
HPDF_LANG_VI ="vi" # Vietnamese
HPDF_LANG_VO ="vo" # Volapuk
HPDF_LANG_WO ="wo" # Wolof
HPDF_LANG_XH ="xh" # Xhosa
HPDF_LANG_YO ="yo" # Yoruba
HPDF_LANG_ZH ="zh" # Chinese
HPDF_LANG_ZU ="zu" # Zulu
#----------------------------------------------------------------------------
#----- Graphis mode ---------------------------------------------------------
HPDF_GMODE_PAGE_DESCRIPTION =0x0001
HPDF_GMODE_PATH_OBJECT =0x0002
HPDF_GMODE_TEXT_OBJECT =0x0004
HPDF_GMODE_CLIPPING_PATH =0x0008
HPDF_GMODE_SHADING =0x0010
HPDF_GMODE_INLINE_IMAGE =0x0020
HPDF_GMODE_EXTERNAL_OBJECT =0x0040
| Python |
###
## * << Haru Free PDF Library 2.0.3 >> -- hpdf_types.h
## *
## * URL http://libharu.org/
## *
## * Copyright (c) 1999-2006 Takeshi Kanno
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
from ctypes import *
#----------------------------------------------------------------------------
#----- type definition ------------------------------------------------------
# native OS integer types
HPDF_INT=c_int
HPDF_UINT=c_uint
# 32bit integer types
HPDF_INT32=c_int
HPDF_UINT32=c_uint
# 16bit integer types
HPDF_INT16=c_short
HPDF_UINT16=c_ushort
# 8bit integer types
HPDF_INT8=c_char
HPDF_UINT8=c_ubyte
# 8bit binary types
HPDF_BYTE=c_ubyte
# float type (32bit IEEE754)
HPDF_REAL=c_float
# double type (64bit IEEE754)
HPDF_DOUBLE=c_double
# boolean type (0: False, !0: True)
HPDF_BOOL=c_int
# error-no type (32bit unsigned integer)
HPDF_STATUS=c_ulong
# charactor-code type (16bit)
HPDF_CID=HPDF_UINT16
HPDF_UNICODE=HPDF_UINT16
# HPDF_Point struct
class _HPDF_Point(Structure):
_fields_=[
('x', HPDF_REAL),
('y', HPDF_REAL),
]
HPDF_Point = _HPDF_Point
class _HPDF_Rect(Structure):
_fields_=[
('left', HPDF_REAL),
('bottom', HPDF_REAL),
('right', HPDF_REAL),
('top', HPDF_REAL),
]
HPDF_Rect = _HPDF_Rect
HPDF_Box= _HPDF_Rect
# HPDF_Date struct
class _HPDF_Date(Structure):
_fields_=[
('year', HPDF_INT),
('month', HPDF_INT),
('day', HPDF_INT),
('hour', HPDF_INT),
('minutes', HPDF_INT),
('seconds', HPDF_INT),
('ind', c_char),
('off_hour', HPDF_INT),
('off_minutes', HPDF_INT),
]
HPDF_Date = _HPDF_Date
#enum starts
## date-time type parameters
HPDF_INFO_CREATION_DATE =0
HPDF_INFO_MOD_DATE =1
## string type parameters
HPDF_INFO_AUTHOR =2
HPDF_INFO_CREATOR =3
HPDF_INFO_PRODUCER =4
HPDF_INFO_TITLE =5
HPDF_INFO_SUBJECT =6
HPDF_INFO_KEYWORDS =7
HPDF_INFO_EOF =8
HPDF_InfoType =c_int
#enum ends
#enum starts
HPDF_VER_12 =0
HPDF_VER_13 =1
HPDF_VER_14 =2
HPDF_VER_15 =3
HPDF_VER_16 =4
HPDF_VER_EOF =5
HPDF_PDFVer =c_int
#enum ends
#enum starts
HPDF_ENCRYPT_R2 =2
HPDF_ENCRYPT_R3 =3
HPDF_EncryptMode =c_int
#enum ends
##typedef void
##(HPDF_STDCALL *HPDF_Error_Handler) (HPDF_STATUS error_no,
## HPDF_STATUS detail_no,
## void *user_data)
def HPDF_Error_Handler(restype,
error_no,
detail_no,
user_data):
return CFUNCTYPE(restype,
error_no,
detail_no,
user_data,
)
#typedef void*
#(HPDF_STDCALL *HPDF_Alloc_Func) (HPDF_UINT size)
HPDF_Alloc_Func=CFUNCTYPE(c_void_p,
HPDF_UINT, #size
)
#typedef void
#(HPDF_STDCALL *HPDF_Free_Func) (void *aptr)
HPDF_Free_Func=CFUNCTYPE(None,
c_void_p, #aptr
)
#---------------------------------------------------------------------------
#------ text width struct --------------------------------------------------
class _HPDF_TextWidth(Structure):
_fields_=[
('numchars', HPDF_UINT),
# don't use this value (it may be change in the feature).
# use numspace as alternated.
('numwords', HPDF_UINT),
('width', HPDF_UINT),
('numspace', HPDF_UINT),
]
HPDF_TextWidth = _HPDF_TextWidth
#---------------------------------------------------------------------------
#------ dash mode ----------------------------------------------------------
class _HPDF_DashMode(Structure):
_fields_=[
('ptn', HPDF_UINT16*8),
('num_ptn', HPDF_UINT),
('phase', HPDF_UINT),
]
HPDF_DashMode = _HPDF_DashMode
#---------------------------------------------------------------------------
#----- HPDF_TransMatrix struct ---------------------------------------------
class _HPDF_TransMatrix(Structure):
_fields_=[
('a', HPDF_REAL),
('b', HPDF_REAL),
('c', HPDF_REAL),
('d', HPDF_REAL),
('x', HPDF_REAL),
('y', HPDF_REAL),
]
HPDF_TransMatrix = _HPDF_TransMatrix
#---------------------------------------------------------------------------
#enum starts
HPDF_CS_DEVICE_GRAY =0
HPDF_CS_DEVICE_RGB =1
HPDF_CS_DEVICE_CMYK =2
HPDF_CS_CAL_GRAY =3
HPDF_CS_CAL_RGB =4
HPDF_CS_LAB =5
HPDF_CS_ICC_BASED =6
HPDF_CS_SEPARATION =7
HPDF_CS_DEVICE_N =8
HPDF_CS_INDEXED =9
HPDF_CS_PATTERN =10
HPDF_CS_EOF =11
HPDF_ColorSpace =c_int
#enum ends
#---------------------------------------------------------------------------
#----- HPDF_RGBColor struct ------------------------------------------------
class _HPDF_RGBColor(Structure):
_fields_=[
('r', HPDF_REAL),
('g', HPDF_REAL),
('b', HPDF_REAL),
]
HPDF_RGBColor = _HPDF_RGBColor
#---------------------------------------------------------------------------
#----- HPDF_CMYKColor struct -----------------------------------------------
class _HPDF_CMYKColor(Structure):
_fields_=[
('c', HPDF_REAL),
('m', HPDF_REAL),
('y', HPDF_REAL),
('k', HPDF_REAL),
]
HPDF_CMYKColor=_HPDF_CMYKColor
#---------------------------------------------------------------------------
#------ The line cap style -------------------------------------------------
#enum starts
HPDF_BUTT_END =0
HPDF_ROUND_END =1
HPDF_PROJECTING_SCUARE_END =2
HPDF_LINECAP_EOF =3
HPDF_LineCap =c_int
#enum ends
#----------------------------------------------------------------------------
#------ The line join style -------------------------------------------------
#enum starts
HPDF_MITER_JOIN =0
HPDF_ROUND_JOIN =1
HPDF_BEVEL_JOIN =2
HPDF_LINEJOIN_EOF =3
HPDF_LineJoin =c_int
#enum ends
#----------------------------------------------------------------------------
#------ The text rendering mode ---------------------------------------------
#enum starts
HPDF_FILL =0
HPDF_STROKE =1
HPDF_FILL_THEN_STROKE =2
HPDF_INVISIBLE =3
HPDF_FILL_CLIPPING =4
HPDF_STROKE_CLIPPING =5
HPDF_FILL_STROKE_CLIPPING =6
HPDF_CLIPPING =7
HPDF_RENDERING_MODE_EOF =8
HPDF_TextRenderingMode =c_int
#enum ends
#enum starts
HPDF_WMODE_HORIZONTAL =0
HPDF_WMODE_VERTICAL =1
HPDF_WMODE_EOF =2
HPDF_WritingMode =c_int
#enum ends
#enum starts
HPDF_PAGE_LAYOUT_SINGLE =0
HPDF_PAGE_LAYOUT_ONE_COLUMN =1
HPDF_PAGE_LAYOUT_TWO_COLUMN_LEFT =2
HPDF_PAGE_LAYOUT_TWO_COLUMN_RIGHT =3
HPDF_PAGE_LAYOUT_EOF =4
HPDF_PageLayout =c_int
#enum ends
#enum starts
HPDF_PAGE_MODE_USE_NONE =0
HPDF_PAGE_MODE_USE_OUTLINE =1
HPDF_PAGE_MODE_USE_THUMBS =2
HPDF_PAGE_MODE_FULL_SCREEN =3
HPDF_PAGE_MODE_USE_OC =4 #???
HPDF_PAGE_MODE_USE_ATTACHMENTS =5 #???
HPDF_PAGE_MODE_EOF =6
HPDF_PageMode =c_int
#enum ends
#enum starts
HPDF_PAGE_NUM_STYLE_DECIMAL =0
HPDF_PAGE_NUM_STYLE_UPPER_ROMAN =1
HPDF_PAGE_NUM_STYLE_LOWER_ROMAN =2
HPDF_PAGE_NUM_STYLE_UPPER_LETTERS =3
HPDF_PAGE_NUM_STYLE_LOWER_LETTERS =4
HPDF_PAGE_NUM_STYLE_EOF =5
HPDF_PageNumStyle =c_int
#enum ends
#enum starts
HPDF_XYZ =0
HPDF_FIT =1
HPDF_FIT_H =2
HPDF_FIT_V =3
HPDF_FIT_R =4
HPDF_FIT_B =5
HPDF_FIT_BH =6
HPDF_FIT_BV =7
HPDF_DST_EOF =8
HPDF_DestinationType =c_int
#enum ends
#enum starts
HPDF_ANNOT_TEXT_NOTES =0
HPDF_ANNOT_LINK =1
HPDF_ANNOT_SOUND =2
HPDF_ANNOT_FREE_TEXT =3
HPDF_ANNOT_STAMP =4
HPDF_ANNOT_SQUARE =5
HPDF_ANNOT_CIRCLE =6
HPDF_ANNOT_STRIKE_OUT =7
HPDF_ANNOT_HIGHTLIGHT =8
HPDF_ANNOT_UNDERLINE =9
HPDF_ANNOT_INK =10
HPDF_ANNOT_FILE_ATTACHMENT =11
HPDF_ANNOT_POPUP =12
HPDF_AnnotType =c_int
#enum ends
#enum starts
HPDF_ANNOT_INVISIBLE =0
HPDF_ANNOT_HIDDEN =1
HPDF_ANNOT_PRINT =2
HPDF_ANNOT_NOZOOM =3
HPDF_ANNOT_NOROTATE =4
HPDF_ANNOT_NOVIEW =5
HPDF_ANNOT_READONLY =6
HPDF_AnnotFlgs =c_int
#enum ends
#enum starts
HPDF_ANNOT_NO_HIGHTLIGHT =0
HPDF_ANNOT_INVERT_BOX =1
HPDF_ANNOT_INVERT_BORDER =2
HPDF_ANNOT_DOWN_APPEARANCE =3
HPDF_ANNOT_HIGHTLIGHT_MODE_EOF =4
HPDF_AnnotHighlightMode =c_int
#enum ends
#enum starts
HPDF_ANNOT_ICON_COMMENT =0
HPDF_ANNOT_ICON_KEY =1
HPDF_ANNOT_ICON_NOTE =2
HPDF_ANNOT_ICON_HELP =3
HPDF_ANNOT_ICON_NEW_PARAGRAPH =4
HPDF_ANNOT_ICON_PARAGRAPH =5
HPDF_ANNOT_ICON_INSERT =6
HPDF_ANNOT_ICON_EOF =7
HPDF_AnnotIcon =c_int
#enum ends
#----------------------------------------------------------------------------
#------ border stype --------------------------------------------------------
#enum starts
HPDF_BS_SOLID =0
HPDF_BS_DASHED =1
HPDF_BS_BEVELED =2
HPDF_BS_INSET =3
HPDF_BS_UNDERLINED =4
HPDF_BSSubtype =c_int
#enum ends
#----- blend modes ----------------------------------------------------------
#enum starts
HPDF_BM_NORMAL =0
HPDF_BM_MULTIPLY =1
HPDF_BM_SCREEN =2
HPDF_BM_OVERLAY =3
HPDF_BM_DARKEN =4
HPDF_BM_LIGHTEN =5
HPDF_BM_COLOR_DODGE =6
HPDF_BM_COLOR_BUM =7
HPDF_BM_HARD_LIGHT =8
HPDF_BM_SOFT_LIGHT =9
HPDF_BM_DIFFERENCE =10
HPDF_BM_EXCLUSHON =11
HPDF_BM_EOF =12
HPDF_BlendMode =c_int
#enum ends
#----- slide show -----------------------------------------------------------
#enum starts
HPDF_TS_WIPE_RIGHT =0
HPDF_TS_WIPE_UP =1
HPDF_TS_WIPE_LEFT =2
HPDF_TS_WIPE_DOWN =3
HPDF_TS_BARN_DOORS_HORIZONTAL_OUT =4
HPDF_TS_BARN_DOORS_HORIZONTAL_IN =5
HPDF_TS_BARN_DOORS_VERTICAL_OUT =6
HPDF_TS_BARN_DOORS_VERTICAL_IN =7
HPDF_TS_BOX_OUT =8
HPDF_TS_BOX_IN =9
HPDF_TS_BLINDS_HORIZONTAL =10
HPDF_TS_BLINDS_VERTICAL =11
HPDF_TS_DISSOLVE =12
HPDF_TS_GLITTER_RIGHT =13
HPDF_TS_GLITTER_DOWN =14
HPDF_TS_GLITTER_TOP_LEFT_TO_BOTTOM_RIGHT =15
HPDF_TS_REPLACE =16
HPDF_TS_EOF =17
HPDF_TransitionStyle =c_int
#enum ends
#----------------------------------------------------------------------------
#enum starts
HPDF_PAGE_SIZE_LETTER =0
HPDF_PAGE_SIZE_LEGAL =1
HPDF_PAGE_SIZE_A3 =2
HPDF_PAGE_SIZE_A4 =3
HPDF_PAGE_SIZE_A5 =4
HPDF_PAGE_SIZE_B4 =5
HPDF_PAGE_SIZE_B5 =6
HPDF_PAGE_SIZE_EXECUTIVE =7
HPDF_PAGE_SIZE_US4x6 =8
HPDF_PAGE_SIZE_US4x8 =9
HPDF_PAGE_SIZE_US5x7 =10
HPDF_PAGE_SIZE_COMM10 =11
HPDF_PAGE_SIZE_EOF =12
HPDF_PageSizes =c_int
#enum ends
#enum starts
HPDF_PAGE_PORTRAIT =0
HPDF_PAGE_LANDSCAPE =1
HPDF_PageDirection =c_int
#enum ends
#enum starts
HPDF_ENCODER_TYPE_SINGLE_BYTE =0
HPDF_ENCODER_TYPE_DOUBLE_BYTE =1
HPDF_ENCODER_TYPE_UNINITIALIZED =2
HPDF_ENCODER_UNKNOWN =3
HPDF_EncoderType =c_int
#enum ends
#enum starts
HPDF_BYTE_TYPE_SINGLE =0
HPDF_BYTE_TYPE_LEAD =1
HPDF_BYTE_TYPE_TRIAL =2
HPDF_BYTE_TYPE_UNKNOWN =3
HPDF_ByteType =c_int
#enum ends
#enum starts
HPDF_TALIGN_LEFT =0
HPDF_TALIGN_RIGHT =1
HPDF_TALIGN_CENTER =2
HPDF_TALIGN_JUSTIFY =3
HPDF_TextAlignment =c_int
#enum ends
| Python |
error_detail={
0x1001: 'Internal error. The consistency of the data was lost.',
0x1002: 'Internal error. The consistency of the data was lost.',
0x1003: 'Internal error. The consistency of the data was lost.',
0x1004: 'The length of the data exceeds HPDF_LIMIT_MAX_STRING_LEN.',
0x1005: 'Cannot get a pallet data from PNG image.',
0x1007: 'The count of elements of a dictionary exceeds HPDF_LIMIT_MAX_DICT_ELEMENT',
0x1008: 'Internal error. The consistency of the data was lost.',
0x1009: 'Internal error. The consistency of the data was lost.',
0x100A: 'Internal error. The consistency of the data was lost.',
0x100B: 'HPDF_SetPermission() OR HPDF_SetEncryptMode() was called before a password is set.',
0x100C: 'Internal error. The consistency of the data was lost.',
0x100E: 'Tried to register a font that has been registered.',
0x100F: 'Cannot register a character to the japanese word wrap characters list.',
0x1011: 'Tried to set the owner password to NULL.\nThe owner password and user password is the same.',
0x1013: 'Internal error. The consistency of the data was lost.',
0x1014: 'The depth of the stack exceeded HPDF_LIMIT_MAX_GSTATE.',
0x1015: 'Memory allocation failed.',
0x1016: 'File processing failed. (A detailed code is set.)',
0x1017: 'Cannot open a file. (A detailed code is set.)',
0x1019: 'Tried to load a font that has been registered.',
0x101A: 'The format of a font-file is invalid .\nInternal error. The consistency of the data was lost.',
0x101B: 'Cannot recognize a header of an afm file.',
0x101C: 'The specified annotation handle is invalid.',
0x101E: 'Bit-per-component of a image which was set as mask-image is invalid.',
0x101F: 'Cannot recognize char-matrics-data of an afm file.',
0x1020: '1. The color_space parameter of HPDF_LoadRawImage is invalid.\n2. Color-space of a image which was set as mask-image is invalid.\n3. The function which is invalid in the present color-space was invoked.',
0x1021: 'Invalid value was set when invoking HPDF_SetCommpressionMode().',
0x1022: 'An invalid date-time value was set.',
0x1023: 'An invalid destination handle was set.',
0x1025: 'An invalid document handle is set.',
0x1026: 'The function which is invalid in the present state was invoked.',
0x1027: 'An invalid encoder handle is set.',
0x1028: 'A combination between font and encoder is wrong.',
0x102B: 'An Invalid encoding name is specified.',
0x102C: 'The lengh of the key of encryption is invalid.',
0x102D: '1. An invalid font handle was set.\n2. Unsupported font format.',
0x102E: 'Internal error. The consistency of the data was lost.',
0x102F: 'A font which has the specified name is not found.',
0x1030: 'Unsupported image format.',
0x1031: 'Unsupported image format.',
0x1032: 'Cannot read a postscript-name from an afm file.',
0x1033: '1. An invalid object is set.\n2. Internal error. The consistency of the data was lost.',
0x1034: 'Internal error. The consistency of the data was lost.',
0x1035: '1. Invoked HPDF_Image_SetColorMask() against the image-object which was set a mask-image.',
0x1036: 'An invalid outline-handle was specified.',
0x1037: 'An invalid page-handle was specified.',
0x1038: 'An invalid pages-handle was specified. (internel error)',
0x1039: 'An invalid value is set.',
0x103B: 'Invalid PNG image format.',
0x103C: 'Internal error. The consistency of the data was lost.',
0x103D: 'Internal error. The "_FILE_NAME" entry for delayed loading is missing.',
0x103F: 'Invalid .TTC file format.',
0x1040: 'The index parameter was exceed the number of included fonts',
0x1041: 'Cannot read a width-data from an afm file.',
0x1042: 'Internal error. The consistency of the data was lost.',
0x1043: 'An error has returned from PNGLIB while loading an image.',
0x1044: 'Internal error. The consistency of the data was lost.',
0x1045: 'Internal error. The consistency of the data was lost.',
0x1049: 'Internal error. The consistency of the data was lost.',
0x104A: 'Internal error. The consistency of the data was lost.',
0x104B: 'Internal error. The consistency of the data was lost.',
0x104C: 'There are no graphics-states to be restored.',
0x104D: 'Internal error. The consistency of the data was lost.',
0x104E: 'The current font is not set.',
0x104F: 'An invalid font-handle was spacified.',
0x1050: 'An invalid font-size was set.',
0x1051: 'See Graphics mode.',
0x1052: 'Internal error. The consistency of the data was lost.',
0x1053: 'The specified value is not a multiple of 90.',
0x1054: 'An invalid page-size was set.',
0x1055: 'An invalid image-handle was set.',
0x1056: 'The specified value is out of range.',
0x1057: 'The specified value is out of range.',
0x1058: 'Unexpected EOF marker was detected.',
0x1059: 'Internal error. The consistency of the data was lost.',
0x105B: 'The length of the specified text is too long.',
0x105C: 'The execution of a function was skipped because of other errors.',
0x105D: 'This font cannot be embedded. (restricted by license)',
0x105E: 'Unsupported ttf format. (cannot find unicode cmap.)',
0x105F: 'Unsupported ttf format.',
0x1060: 'Unsupported ttf format. (cannot find a necessary table) ',
0x1061: 'Internal error. The consistency of the data was lost.',
0x1062: '1. The library is not configured to use PNGLIB.\n2. Internal error. The consistency of the data was lost.',
0x1063: 'Unsupported Jpeg format.',
0x1064: 'Failed to parse .PFB file.',
0x1065: 'Internal error. The consistency of the data was lost.',
0x1066: 'An error has occurred while executing a function of Zlib.',
0x1067: 'An error returned from Zlib.',
0x1068: 'An invalid URI was set.',
0x1069: 'An invalid page-layout was set.',
0x1070: 'An invalid page-mode was set.',
0x1071: 'An invalid page-num-style was set.',
0x1072: 'An invalid icon was set.',
0x1073: 'An invalid border-style was set.',
0x1074: 'An invalid page-direction was set.',
0x1075: 'An invalid font-handle was specified.',
} | Python |
def printf(format, *optional):
#print 'format="%s"' % format
#print 'optional="%s"' % optional
if len(optional)==1:
optional=optional[0]
if format.endswith('\n'):
format=format[:-1]
print format % optional
| Python |
##
## * << Haru Free PDF Library 2.0.8 >> -- hpdf.h
## *
## * URL http://libharu.org/
## *
## * Copyright (c) 1999-2006 Takeshi Kanno
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os
import sys
import types
def setpath():
dllpath='%s/dll' %(os.path.dirname(os.path.realpath(__file__)))
if 'PATH' in os.environ:
if dllpath not in os.environ['PATH']:
os.environ['PATH']='%s;%s' % (dllpath, os.environ['PATH'])
else:
os.environ['PATH']=dllpath
setpath()
from hpdf_consts import *
from hpdf_types import *
if os.sys.platform=='win32':
harudll='libhpdf.dll'
#haru=WinDLL(harudll)
haru=CDLL(harudll)
else:
harudll='libhpdf.so'
haru=CDLL(harudll)
HPDF_HANDLE=c_void_p
HPDF_Doc=HPDF_HANDLE
HPDF_Page=HPDF_HANDLE
HPDF_Pages=HPDF_HANDLE
HPDF_Stream=HPDF_HANDLE
HPDF_Image=HPDF_HANDLE
HPDF_Font=HPDF_HANDLE
HPDF_Outline=HPDF_HANDLE
HPDF_Encoder=HPDF_HANDLE
HPDF_Destination=HPDF_HANDLE
HPDF_XObject=HPDF_HANDLE
HPDF_Annotation=HPDF_HANDLE
HPDF_ExtGState=HPDF_HANDLE
#const char * HPDF_GetVersion (void)
HPDF_GetVersion=haru.HPDF_GetVersion
HPDF_GetVersion.restype=c_char_p
#HPDF_Doc HPDF_NewEx (HPDF_Error_Handler user_error_fn, HPDF_Alloc_Func user_alloc_fn, HPDF_Free_Func user_free_fn, HPDF_UINT mem_pool_buf_size, void *user_data)
HPDF_NewEx=haru.HPDF_NewEx
HPDF_NewEx.restype=HPDF_Doc
#HPDF_Doc HPDF_New (HPDF_Error_Handler user_error_fn, void *user_data)
HPDF_New=haru.HPDF_New
HPDF_New.restype=HPDF_Doc
#HPDF_STATUS HPDF_SetErrorHandler (HPDF_Doc pdf, HPDF_Error_Handler user_error_fn)
HPDF_SetErrorHandler=haru.HPDF_SetErrorHandler
HPDF_SetErrorHandler.restype=HPDF_STATUS
#void HPDF_Free (HPDF_Doc pdf)
HPDF_Free=haru.HPDF_Free
HPDF_Free.restype=None
#HPDF_STATUS HPDF_NewDoc (HPDF_Doc pdf)
HPDF_NewDoc=haru.HPDF_NewDoc
HPDF_NewDoc.restype=HPDF_STATUS
#void HPDF_FreeDoc (HPDF_Doc pdf)
HPDF_FreeDoc=haru.HPDF_FreeDoc
HPDF_FreeDoc.restype=None
#HPDF_BOOL HPDF_HasDoc (HPDF_Doc pdf)
HPDF_HasDoc=haru.HPDF_HasDoc
HPDF_HasDoc.restype=HPDF_BOOL
#void HPDF_FreeDocAll (HPDF_Doc pdf)
HPDF_FreeDocAll=haru.HPDF_FreeDocAll
HPDF_FreeDocAll.restype=None
#HPDF_STATUS HPDF_SaveToStream (HPDF_Doc pdf)
HPDF_SaveToStream=haru.HPDF_SaveToStream
HPDF_SaveToStream.restype=HPDF_STATUS
#HPDF_UINT32 HPDF_GetStreamSize (HPDF_Doc pdf)
HPDF_GetStreamSize=haru.HPDF_GetStreamSize
HPDF_GetStreamSize.restype=HPDF_UINT32
#HPDF_STATUS HPDF_ReadFromStream (HPDF_Doc pdf, HPDF_BYTE *buf, HPDF_UINT32 *size)
_HPDF_ReadFromStream=haru.HPDF_ReadFromStream
_HPDF_ReadFromStream.restype=HPDF_STATUS
def HPDF_ReadFromStream(
pdf, #HPDF_Doc
buf, #POINTER(HPDF_BYTE)
size, #POINTER(HPDF_UINT32)
):
if type(buf) in (types.ListType, types.TupleType):
size=len(buf)
buf=pointer((HPDF_BYTE*size)(*buf))
size=HPDF_UINT32(int(size))
return _HPDF_ReadFromStream(
pdf, #HPDF_Doc
buf, #POINTER(HPDF_BYTE)
size, #POINTER(HPDF_UINT32)
)
#HPDF_STATUS HPDF_ResetStream (HPDF_Doc pdf)
HPDF_ResetStream=haru.HPDF_ResetStream
HPDF_ResetStream.restype=HPDF_STATUS
#HPDF_STATUS HPDF_SaveToFile (HPDF_Doc pdf, const char *file_name)
HPDF_SaveToFile=haru.HPDF_SaveToFile
HPDF_SaveToFile.restype=HPDF_STATUS
#HPDF_STATUS HPDF_GetError (HPDF_Doc pdf)
HPDF_GetError=haru.HPDF_GetError
HPDF_GetError.restype=HPDF_STATUS
#HPDF_STATUS HPDF_GetErrorDetail (HPDF_Doc pdf)
HPDF_GetErrorDetail=haru.HPDF_GetErrorDetail
HPDF_GetErrorDetail.restype=HPDF_STATUS
#void HPDF_ResetError (HPDF_Doc pdf)
HPDF_ResetError=haru.HPDF_ResetError
HPDF_ResetError.restype=None
#HPDF_STATUS HPDF_SetPagesConfiguration (HPDF_Doc pdf, HPDF_UINT page_per_pages)
_HPDF_SetPagesConfiguration=haru.HPDF_SetPagesConfiguration
_HPDF_SetPagesConfiguration.restype=HPDF_STATUS
def HPDF_SetPagesConfiguration(
pdf, #HPDF_Doc
page_per_pages, #HPDF_UINT
):
page_per_pages=HPDF_UINT(int(page_per_pages))
return _HPDF_SetPagesConfiguration(
pdf, #HPDF_Doc
page_per_pages, #HPDF_UINT
)
#HPDF_Page HPDF_GetPageByIndex (HPDF_Doc pdf, HPDF_UINT index)
HPDF_GetPageByIndex=haru.HPDF_GetPageByIndex
HPDF_GetPageByIndex.restype=HPDF_Page
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#HPDF_PageLayout HPDF_GetPageLayout (HPDF_Doc pdf)
HPDF_GetPageLayout=haru.HPDF_GetPageLayout
HPDF_GetPageLayout.restype=HPDF_PageLayout
#HPDF_STATUS HPDF_SetPageLayout (HPDF_Doc pdf, HPDF_PageLayout layout)
HPDF_SetPageLayout=haru.HPDF_SetPageLayout
HPDF_SetPageLayout.restype=HPDF_STATUS
#HPDF_PageMode HPDF_GetPageMode (HPDF_Doc pdf)
HPDF_GetPageMode=haru.HPDF_GetPageMode
HPDF_GetPageMode.restype=HPDF_PageMode
#HPDF_STATUS HPDF_SetPageMode (HPDF_Doc pdf, HPDF_PageMode mode)
HPDF_SetPageMode=haru.HPDF_SetPageMode
HPDF_SetPageMode.restype=HPDF_STATUS
#HPDF_UINT HPDF_GetViewerPreference (HPDF_Doc pdf)
HPDF_GetViewerPreference=haru.HPDF_GetViewerPreference
HPDF_GetViewerPreference.restype=HPDF_UINT
#HPDF_STATUS HPDF_SetViewerPreference (HPDF_Doc pdf, HPDF_UINT value)
HPDF_SetViewerPreference=haru.HPDF_SetViewerPreference
HPDF_SetViewerPreference.restype=HPDF_STATUS
#HPDF_STATUS HPDF_SetOpenAction (HPDF_Doc pdf, HPDF_Destination open_action)
HPDF_SetOpenAction=haru.HPDF_SetOpenAction
HPDF_SetOpenAction.restype=HPDF_STATUS
#---------------------------------------------------------------------------
#----- page handling -------------------------------------------------------
#HPDF_Page HPDF_GetCurrentPage (HPDF_Doc pdf)
HPDF_GetCurrentPage=haru.HPDF_GetCurrentPage
HPDF_GetCurrentPage.restype=HPDF_Page
#HPDF_Page HPDF_AddPage (HPDF_Doc pdf)
HPDF_AddPage=haru.HPDF_AddPage
HPDF_AddPage.restype=HPDF_Page
#HPDF_Page HPDF_InsertPage (HPDF_Doc pdf, HPDF_Page page)
HPDF_InsertPage=haru.HPDF_InsertPage
HPDF_InsertPage.restype=HPDF_Page
#HPDF_STATUS HPDF_Page_SetWidth (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetWidth=haru.HPDF_Page_SetWidth
_HPDF_Page_SetWidth.restype=HPDF_STATUS
def HPDF_Page_SetWidth(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetWidth(
page, #HPDF_Page
value, #HPDF_REAL
)
#HPDF_STATUS HPDF_Page_SetHeight (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetHeight=haru.HPDF_Page_SetHeight
_HPDF_Page_SetHeight.restype=HPDF_STATUS
def HPDF_Page_SetHeight(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetHeight(
page, #HPDF_Page
value, #HPDF_REAL
)
#HPDF_STATUS
#HPDF_Page_SetSize (HPDF_Page page,
# HPDF_PageSizes size,
# HPDF_PageDirection direction);
HPDF_Page_SetSize=haru.HPDF_Page_SetSize
HPDF_Page_SetSize.restype=HPDF_STATUS
#HPDF_STATUS HPDF_Page_SetRotate (HPDF_Page page, HPDF_UINT16 angle)
_HPDF_Page_SetRotate=haru.HPDF_Page_SetRotate
_HPDF_Page_SetRotate.restype=HPDF_STATUS
def HPDF_Page_SetRotate(
page, #HPDF_Page
angle, #HPDF_UINT16
):
angle=HPDF_UINT16(int(angle))
return _HPDF_Page_SetRotate(
page, #HPDF_Page
angle, #HPDF_UINT16
)
#---------------------------------------------------------------------------
#----- font handling -------------------------------------------------------
#HPDF_Font HPDF_GetFont (HPDF_Doc pdf, const char *font_name, const char *encoding_name)
HPDF_GetFont=haru.HPDF_GetFont
HPDF_GetFont.restype=HPDF_Font
#const char* HPDF_LoadType1FontFromFile (HPDF_Doc pdf, const char *afm_file_name, const char *data_file_name)
HPDF_LoadType1FontFromFile=haru.HPDF_LoadType1FontFromFile
HPDF_LoadType1FontFromFile.restype=c_char_p
#const char* HPDF_LoadTTFontFromFile (HPDF_Doc pdf, const char *file_name, HPDF_BOOL embedding)
HPDF_LoadTTFontFromFile=haru.HPDF_LoadTTFontFromFile
HPDF_LoadTTFontFromFile.restype=c_char_p
#const char* HPDF_LoadTTFontFromFile2 (HPDF_Doc pdf, const char *file_name, HPDF_UINT index, HPDF_BOOL embedding)
HPDF_LoadTTFontFromFile2=haru.HPDF_LoadTTFontFromFile2
HPDF_LoadTTFontFromFile2.restype=c_char_p
#HPDF_STATUS HPDF_AddPageLabel (HPDF_Doc pdf, HPDF_UINT page_num, HPDF_PageNumStyle style, HPDF_UINT first_page, const char *prefix)
_HPDF_AddPageLabel=haru.HPDF_AddPageLabel
_HPDF_AddPageLabel.restype=HPDF_STATUS
def HPDF_AddPageLabel(
pdf, #HPDF_Doc
page_num, #HPDF_UINT
style, #HPDF_PageNumStyle
first_page, #HPDF_UINT
prefix, #c_char_p
):
page_num, first_page=[HPDF_UINT(int(i))for i in (page_num, first_page)]
return _HPDF_AddPageLabel(
pdf, #HPDF_Doc
page_num, #HPDF_UINT
style, #HPDF_PageNumStyle
first_page, #HPDF_UINT
prefix, #c_char_p
)
#HPDF_STATUS HPDF_UseJPFonts (HPDF_Doc pdf)
HPDF_UseJPFonts=haru.HPDF_UseJPFonts
HPDF_UseJPFonts.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseKRFonts (HPDF_Doc pdf)
HPDF_UseKRFonts=haru.HPDF_UseKRFonts
HPDF_UseKRFonts.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseCNSFonts (HPDF_Doc pdf)
HPDF_UseCNSFonts=haru.HPDF_UseCNSFonts
HPDF_UseCNSFonts.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseCNTFonts (HPDF_Doc pdf)
HPDF_UseCNTFonts=haru.HPDF_UseCNTFonts
HPDF_UseCNTFonts.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- outline ------------------------------------------------------------
#HPDF_Outline HPDF_CreateOutline (HPDF_Doc pdf, HPDF_Outline parent, const char *title, HPDF_Encoder encoder)
HPDF_CreateOutline=haru.HPDF_CreateOutline
HPDF_CreateOutline.restype=HPDF_Outline
#HPDF_STATUS HPDF_Outline_SetOpened (HPDF_Outline outline, HPDF_BOOL opened)
HPDF_Outline_SetOpened=haru.HPDF_Outline_SetOpened
HPDF_Outline_SetOpened.restype=HPDF_STATUS
#HPDF_STATUS HPDF_Outline_SetDestination (HPDF_Outline outline, HPDF_Destination dst)
HPDF_Outline_SetDestination=haru.HPDF_Outline_SetDestination
HPDF_Outline_SetDestination.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- destination --------------------------------------------------------
#HPDF_Destination HPDF_Page_CreateDestination (HPDF_Page page)
HPDF_Page_CreateDestination=haru.HPDF_Page_CreateDestination
HPDF_Page_CreateDestination.restype=HPDF_Destination
#HPDF_STATUS HPDF_Destination_SetXYZ (HPDF_Destination dst, HPDF_REAL left, HPDF_REAL top, HPDF_REAL zoom)
_HPDF_Destination_SetXYZ=haru.HPDF_Destination_SetXYZ
_HPDF_Destination_SetXYZ.restype=HPDF_STATUS
def HPDF_Destination_SetXYZ(
dst, #HPDF_Destination
left, #HPDF_REAL
top, #HPDF_REAL
zoom, #HPDF_REAL
):
left=HPDF_REAL(left)
top=HPDF_REAL(top)
zoom=HPDF_REAL(zoom)
return _HPDF_Destination_SetXYZ(
dst, #HPDF_Destination
left, #HPDF_REAL
top, #HPDF_REAL
zoom, #HPDF_REAL
)
#HPDF_STATUS HPDF_Destination_SetFit (HPDF_Destination dst)
HPDF_Destination_SetFit=haru.HPDF_Destination_SetFit
HPDF_Destination_SetFit.restype=HPDF_STATUS
#HPDF_STATUS HPDF_Destination_SetFitH (HPDF_Destination dst, HPDF_REAL top)
_HPDF_Destination_SetFitH=haru.HPDF_Destination_SetFitH
_HPDF_Destination_SetFitH.restype=HPDF_STATUS
def HPDF_Destination_SetFitH(
dst, #HPDF_Destination
top, #HPDF_REAL
):
top=HPDF_REAL(top)
return _HPDF_Destination_SetFitH(
dst, #HPDF_Destination
top, #HPDF_REAL
)
#HPDF_STATUS HPDF_Destination_SetFitV (HPDF_Destination dst, HPDF_REAL left)
_HPDF_Destination_SetFitV=haru.HPDF_Destination_SetFitV
_HPDF_Destination_SetFitV.restype=HPDF_STATUS
def HPDF_Destination_SetFitV(
dst, #HPDF_Destination
left, #HPDF_REAL
):
left=HPDF_REAL(left)
return _HPDF_Destination_SetFitV(
dst, #HPDF_Destination
left, #HPDF_REAL
)
#HPDF_STATUS HPDF_Destination_SetFitR (HPDF_Destination dst, HPDF_REAL left, HPDF_REAL bottom, HPDF_REAL right, HPDF_REAL top)
_HPDF_Destination_SetFitR=haru.HPDF_Destination_SetFitR
_HPDF_Destination_SetFitR.restype=HPDF_STATUS
def HPDF_Destination_SetFitR(
dst, #HPDF_Destination
left, #HPDF_REAL
bottom, #HPDF_REAL
right, #HPDF_REAL
top, #HPDF_REAL
):
left=HPDF_REAL(left)
bottom=HPDF_REAL(bottom)
right=HPDF_REAL(right)
top=HPDF_REAL(top)
return _HPDF_Destination_SetFitR(
dst, #HPDF_Destination
left, #HPDF_REAL
bottom, #HPDF_REAL
right, #HPDF_REAL
top, #HPDF_REAL
)
#HPDF_STATUS HPDF_Destination_SetFitB (HPDF_Destination dst)
HPDF_Destination_SetFitB=haru.HPDF_Destination_SetFitB
HPDF_Destination_SetFitB.restype=HPDF_STATUS
#HPDF_STATUS HPDF_Destination_SetFitBH (HPDF_Destination dst, HPDF_REAL top)
_HPDF_Destination_SetFitBH=haru.HPDF_Destination_SetFitBH
_HPDF_Destination_SetFitBH.restype=HPDF_STATUS
def HPDF_Destination_SetFitBH(
dst, #HPDF_Destination
top, #HPDF_REAL
):
top=HPDF_REAL(top)
return _HPDF_Destination_SetFitBH(
dst, #HPDF_Destination
top, #HPDF_REAL
)
#HPDF_STATUS HPDF_Destination_SetFitBV (HPDF_Destination dst, HPDF_REAL left)
_HPDF_Destination_SetFitBV=haru.HPDF_Destination_SetFitBV
_HPDF_Destination_SetFitBV.restype=HPDF_STATUS
def HPDF_Destination_SetFitBV(
dst, #HPDF_Destination
left, #HPDF_REAL
):
left=HPDF_REAL(left)
return _HPDF_Destination_SetFitBV(
dst, #HPDF_Destination
left, #HPDF_REAL
)
#--------------------------------------------------------------------------
#----- encoder ------------------------------------------------------------
#HPDF_Encoder HPDF_GetEncoder (HPDF_Doc pdf, const char *encoding_name)
HPDF_GetEncoder=haru.HPDF_GetEncoder
HPDF_GetEncoder.restype=HPDF_Encoder
#HPDF_Encoder HPDF_GetCurrentEncoder (HPDF_Doc pdf)
HPDF_GetCurrentEncoder=haru.HPDF_GetCurrentEncoder
HPDF_GetCurrentEncoder.restype=HPDF_Encoder
#HPDF_STATUS HPDF_SetCurrentEncoder (HPDF_Doc pdf, const char *encoding_name)
HPDF_SetCurrentEncoder=haru.HPDF_SetCurrentEncoder
HPDF_SetCurrentEncoder.restype=HPDF_STATUS
#HPDF_EncoderType HPDF_Encoder_GetType (HPDF_Encoder encoder)
HPDF_Encoder_GetType=haru.HPDF_Encoder_GetType
HPDF_Encoder_GetType.restype=HPDF_EncoderType
#HPDF_ByteType HPDF_Encoder_GetByteType (HPDF_Encoder encoder, const char *text, HPDF_UINT index)
_HPDF_Encoder_GetByteType=haru.HPDF_Encoder_GetByteType
_HPDF_Encoder_GetByteType.restype=HPDF_ByteType
def HPDF_Encoder_GetByteType(
encoder, #HPDF_Encoder
text, #const char *
index #HPDF_UINT
):
if type(text) in (types.ListType, types.TupleType):
if type(text[-1]) != types.StringType:
text=[chr(i) for i in text]
text=''.join(text)
return _HPDF_Encoder_GetByteType(
encoder, #HPDF_Encoder
text, #const char *
index #HPDF_UINT
)
#HPDF_UNICODE HPDF_Encoder_GetUnicode (HPDF_Encoder encoder, HPDF_UINT16 code)
HPDF_Encoder_GetUnicode=haru.HPDF_Encoder_GetUnicode
HPDF_Encoder_GetUnicode.restype=HPDF_UNICODE
#HPDF_WritingMode HPDF_Encoder_GetWritingMode (HPDF_Encoder encoder)
HPDF_Encoder_GetWritingMode=haru.HPDF_Encoder_GetWritingMode
HPDF_Encoder_GetWritingMode.restype=HPDF_WritingMode
#HPDF_STATUS HPDF_UseJPEncodings (HPDF_Doc pdf)
HPDF_UseJPEncodings=haru.HPDF_UseJPEncodings
HPDF_UseJPEncodings.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseKREncodings (HPDF_Doc pdf)
HPDF_UseKREncodings=haru.HPDF_UseKREncodings
HPDF_UseKREncodings.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseCNSEncodings (HPDF_Doc pdf)
HPDF_UseCNSEncodings=haru.HPDF_UseCNSEncodings
HPDF_UseCNSEncodings.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseCNTEncodings (HPDF_Doc pdf)
HPDF_UseCNTEncodings=haru.HPDF_UseCNTEncodings
HPDF_UseCNTEncodings.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- annotation ---------------------------------------------------------
#HPDF_Annotation HPDF_Page_CreateTextAnnot (HPDF_Page page, HPDF_Rect rect, const char *text, HPDF_Encoder encoder)
HPDF_Page_CreateTextAnnot=haru.HPDF_Page_CreateTextAnnot
HPDF_Page_CreateTextAnnot.restype=HPDF_Annotation
#HPDF_Annotation HPDF_Page_CreateLinkAnnot (HPDF_Page page, HPDF_Rect rect, HPDF_Destination dst)
HPDF_Page_CreateLinkAnnot=haru.HPDF_Page_CreateLinkAnnot
HPDF_Page_CreateLinkAnnot.restype=HPDF_Annotation
#HPDF_Annotation HPDF_Page_CreateURILinkAnnot (HPDF_Page page, HPDF_Rect rect, const char *uri)
HPDF_Page_CreateURILinkAnnot=haru.HPDF_Page_CreateURILinkAnnot
HPDF_Page_CreateURILinkAnnot.restype=HPDF_Annotation
#HPDF_STATUS HPDF_LinkAnnot_SetHighlightMode (HPDF_Annotation annot, HPDF_AnnotHighlightMode mode)
HPDF_LinkAnnot_SetHighlightMode=haru.HPDF_LinkAnnot_SetHighlightMode
HPDF_LinkAnnot_SetHighlightMode.restype=HPDF_STATUS
#HPDF_STATUS HPDF_LinkAnnot_SetBorderStyle (HPDF_Annotation annot, HPDF_REAL width, HPDF_UINT16 dash_on, HPDF_UINT16 dash_off)
_HPDF_LinkAnnot_SetBorderStyle=haru.HPDF_LinkAnnot_SetBorderStyle
_HPDF_LinkAnnot_SetBorderStyle.restype=HPDF_STATUS
def HPDF_LinkAnnot_SetBorderStyle(
annot, #HPDF_Annotation
width, #HPDF_REAL
dash_on, #HPDF_UINT16
dash_off, #HPDF_UINT16
):
width=HPDF_REAL(width)
dash_on=HPDF_UINT16(dash_on)
dash_off=HPDF_UINT16(dash_off)
return _HPDF_LinkAnnot_SetBorderStyle(
annot, #HPDF_Annotation
width, #HPDF_REAL
dash_on, #HPDF_UINT16
dash_off, #HPDF_UINT16
)
#HPDF_STATUS HPDF_TextAnnot_SetIcon (HPDF_Annotation annot, HPDF_AnnotIcon icon)
HPDF_TextAnnot_SetIcon=haru.HPDF_TextAnnot_SetIcon
HPDF_TextAnnot_SetIcon.restype=HPDF_STATUS
#HPDF_STATUS HPDF_TextAnnot_SetOpened (HPDF_Annotation annot, HPDF_BOOL opened)
HPDF_TextAnnot_SetOpened=haru.HPDF_TextAnnot_SetOpened
HPDF_TextAnnot_SetOpened.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- image data ---------------------------------------------------------
#HPDF_Image HPDF_LoadPngImageFromFile (HPDF_Doc pdf, const char *filename)
HPDF_LoadPngImageFromFile=haru.HPDF_LoadPngImageFromFile
HPDF_LoadPngImageFromFile.restype=HPDF_Image
#HPDF_Image HPDF_LoadPngImageFromFile2 (HPDF_Doc pdf, const char *filename)
HPDF_LoadPngImageFromFile2=haru.HPDF_LoadPngImageFromFile2
HPDF_LoadPngImageFromFile2.restype=HPDF_Image
#HPDF_Image HPDF_LoadJpegImageFromFile (HPDF_Doc pdf, const char *filename)
HPDF_LoadJpegImageFromFile=haru.HPDF_LoadJpegImageFromFile
HPDF_LoadJpegImageFromFile.restype=HPDF_Image
#HPDF_Image HPDF_LoadRawImageFromFile (HPDF_Doc pdf, const char *filename, HPDF_UINT width, HPDF_UINT height, HPDF_ColorSpace color_space)
_HPDF_LoadRawImageFromFile=haru.HPDF_LoadRawImageFromFile
_HPDF_LoadRawImageFromFile.restype=HPDF_Image
def HPDF_LoadRawImageFromFile(
pdf, #HPDF_Doc
filename, #c_char_p
width, #HPDF_UINT
height, #HPDF_UINT
color_space, #HPDF_ColorSpace
):
width=HPDF_UINT(width)
height=HPDF_UINT(height)
return _HPDF_LoadRawImageFromFile(
pdf, #HPDF_Doc
filename, #c_char_p
width, #HPDF_UINT
height, #HPDF_UINT
color_space, #HPDF_ColorSpace
)
#HPDF_Image HPDF_LoadRawImageFromMem (HPDF_Doc pdf, const HPDF_BYTE *buf, HPDF_UINT width, HPDF_UINT height, HPDF_ColorSpace color_space, HPDF_UINT bits_per_component)
_HPDF_LoadRawImageFromMem=haru.HPDF_LoadRawImageFromMem
_HPDF_LoadRawImageFromMem.restype=HPDF_Image
def HPDF_LoadRawImageFromMem(
pdf, #HPDF_Doc
buf, #POINTER(HPDF_BYTE)
width, #HPDF_UINT
height, #HPDF_UINT
color_space, #HPDF_ColorSpace
bits_per_component, #HPDF_UINT
):
if type(buf) in (types.ListType, types.TupleType):
size=len(buf)
buf=pointer((HPDF_BYTE*size)(*buf))
if height in [0, None]:
height=size/width
width=HPDF_UINT(width)
height=HPDF_UINT(height)
bits_per_component=HPDF_UINT(bits_per_component)
return _HPDF_LoadRawImageFromMem(
pdf, #HPDF_Doc
buf, #POINTER(HPDF_BYTE)
width, #HPDF_UINT
height, #HPDF_UINT
color_space, #HPDF_ColorSpace
bits_per_component, #HPDF_UINT
)
#HPDF_Point HPDF_Image_GetSize (HPDF_Image image)
HPDF_Image_GetSize=haru.HPDF_Image_GetSize
HPDF_Image_GetSize.restype=HPDF_Point
#HPDF_STATUS HPDF_Image_GetSize2 (HPDF_Image image, HPDF_Point *size)
_HPDF_Image_GetSize2=haru.HPDF_Image_GetSize2
_HPDF_Image_GetSize2.restype=HPDF_STATUS
def HPDF_Image_GetSize2(
image, #HPDF_Image
size=None, #POINTER(HPDF_Point)
):
size=HPDF_Point
ret= _HPDF_Image_GetSize2(
image, #HPDF_Image
size, #POINTER(HPDF_Point)
)
return ret, size.x, size.y
#HPDF_UINT HPDF_Image_GetWidth (HPDF_Image image)
HPDF_Image_GetWidth=haru.HPDF_Image_GetWidth
HPDF_Image_GetWidth.restype=HPDF_UINT
#HPDF_UINT HPDF_Image_GetHeight (HPDF_Image image)
HPDF_Image_GetHeight=haru.HPDF_Image_GetHeight
HPDF_Image_GetHeight.restype=HPDF_UINT
#HPDF_UINT HPDF_Image_GetBitsPerComponent (HPDF_Image image)
HPDF_Image_GetBitsPerComponent=haru.HPDF_Image_GetBitsPerComponent
HPDF_Image_GetBitsPerComponent.restype=HPDF_UINT
#const char* HPDF_Image_GetColorSpace (HPDF_Image image)
HPDF_Image_GetColorSpace=haru.HPDF_Image_GetColorSpace
HPDF_Image_GetColorSpace.restype=c_char_p
#HPDF_STATUS HPDF_Image_SetColorMask (HPDF_Image image, HPDF_UINT rmin, HPDF_UINT rmax, HPDF_UINT gmin, HPDF_UINT gmax, HPDF_UINT bmin, HPDF_UINT bmax)
_HPDF_Image_SetColorMask=haru.HPDF_Image_SetColorMask
_HPDF_Image_SetColorMask.restype=HPDF_STATUS
def HPDF_Image_SetColorMask(
image, #HPDF_Image
rmin, #HPDF_UINT
rmax, #HPDF_UINT
gmin, #HPDF_UINT
gmax, #HPDF_UINT
bmin, #HPDF_UINT
bmax, #HPDF_UINT
):
rmin=HPDF_UINT(rmin)
rmax=HPDF_UINT(rmax)
gmin=HPDF_UINT(gmin)
gmax=HPDF_UINT(gmax)
bmin=HPDF_UINT(bmin)
bmax=HPDF_UINT(bmax)
return _HPDF_Image_SetColorMask(
image, #HPDF_Image
rmin, #HPDF_UINT
rmax, #HPDF_UINT
gmin, #HPDF_UINT
gmax, #HPDF_UINT
bmin, #HPDF_UINT
bmax, #HPDF_UINT
)
#HPDF_STATUS HPDF_Image_SetMaskImage (HPDF_Image image, HPDF_Image mask_image)
HPDF_Image_SetMaskImage=haru.HPDF_Image_SetMaskImage
HPDF_Image_SetMaskImage.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- info dictionary ----------------------------------------------------
#HPDF_STATUS HPDF_SetInfoAttr (HPDF_Doc pdf, HPDF_InfoType type, const char *value)
HPDF_SetInfoAttr=haru.HPDF_SetInfoAttr
HPDF_SetInfoAttr.restype=HPDF_STATUS
#const char* HPDF_GetInfoAttr (HPDF_Doc pdf, HPDF_InfoType type)
HPDF_GetInfoAttr=haru.HPDF_GetInfoAttr
HPDF_GetInfoAttr.restype=c_char_p
#HPDF_STATUS HPDF_SetInfoDateAttr (HPDF_Doc pdf, HPDF_InfoType type, HPDF_Date value)
HPDF_SetInfoDateAttr=haru.HPDF_SetInfoDateAttr
HPDF_SetInfoDateAttr.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- encryption ---------------------------------------------------------
#HPDF_STATUS HPDF_SetPassword (HPDF_Doc pdf, const char *owner_passwd, const char *user_passwd)
HPDF_SetPassword=haru.HPDF_SetPassword
HPDF_SetPassword.restype=HPDF_STATUS
#HPDF_STATUS HPDF_SetPermission (HPDF_Doc pdf, HPDF_UINT permission)
_HPDF_SetPermission=haru.HPDF_SetPermission
_HPDF_SetPermission.restype=HPDF_STATUS
def HPDF_SetPermission(
pdf, #HPDF_Doc
permission, #HPDF_UINT
):
permission=HPDF_UINT(int(permission))
return _HPDF_SetPermission(
pdf, #HPDF_Doc
permission, #HPDF_UINT
)
#HPDF_STATUS HPDF_SetEncryptionMode (HPDF_Doc pdf, HPDF_EncryptMode mode, HPDF_UINT key_len)
_HPDF_SetEncryptionMode=haru.HPDF_SetEncryptionMode
_HPDF_SetEncryptionMode.restype=HPDF_STATUS
def HPDF_SetEncryptionMode(
pdf, #HPDF_Doc
mode, #HPDF_EncryptMode
key_len, #HPDF_UINT
):
key_len=HPDF_UINT(int(key_len))
return _HPDF_SetEncryptionMode(
pdf, #HPDF_Doc
mode, #HPDF_EncryptMode
key_len, #HPDF_UINT
)
#--------------------------------------------------------------------------
#----- compression --------------------------------------------------------
#HPDF_STATUS HPDF_SetCompressionMode (HPDF_Doc pdf, HPDF_UINT mode)
HPDF_SetCompressionMode=haru.HPDF_SetCompressionMode
HPDF_SetCompressionMode.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- font ---------------------------------------------------------------
#const char* HPDF_Font_GetFontName (HPDF_Font font)
HPDF_Font_GetFontName=haru.HPDF_Font_GetFontName
HPDF_Font_GetFontName.restype=c_char_p
#const char* HPDF_Font_GetEncodingName (HPDF_Font font)
HPDF_Font_GetEncodingName=haru.HPDF_Font_GetEncodingName
HPDF_Font_GetEncodingName.restype=c_char_p
#HPDF_INT HPDF_Font_GetUnicodeWidth (HPDF_Font font, HPDF_UNICODE code)
HPDF_Font_GetUnicodeWidth=haru.HPDF_Font_GetUnicodeWidth
HPDF_Font_GetUnicodeWidth.restype=HPDF_INT
#HPDF_Box HPDF_Font_GetBBox (HPDF_Font font)
HPDF_Font_GetBBox=haru.HPDF_Font_GetBBox
HPDF_Font_GetBBox.restype=HPDF_Box
#HPDF_INT HPDF_Font_GetAscent (HPDF_Font font)
HPDF_Font_GetAscent=haru.HPDF_Font_GetAscent
HPDF_Font_GetAscent.restype=HPDF_INT
#HPDF_INT HPDF_Font_GetDescent (HPDF_Font font)
HPDF_Font_GetDescent=haru.HPDF_Font_GetDescent
HPDF_Font_GetDescent.restype=HPDF_INT
#HPDF_UINT HPDF_Font_GetXHeight (HPDF_Font font)
HPDF_Font_GetXHeight=haru.HPDF_Font_GetXHeight
HPDF_Font_GetXHeight.restype=HPDF_UINT
#HPDF_UINT HPDF_Font_GetCapHeight (HPDF_Font font)
HPDF_Font_GetCapHeight=haru.HPDF_Font_GetCapHeight
HPDF_Font_GetCapHeight.restype=HPDF_UINT
#HPDF_TextWidth HPDF_Font_TextWidth (HPDF_Font font, const HPDF_BYTE *text, HPDF_UINT len)
HPDF_Font_TextWidth=haru.HPDF_Font_TextWidth
HPDF_Font_TextWidth.restype=HPDF_TextWidth
#HPDF_UINT HPDF_Font_MeasureText (HPDF_Font font, const HPDF_BYTE *text, HPDF_UINT len, HPDF_REAL width, HPDF_REAL font_size, HPDF_REAL char_space, HPDF_REAL word_space, HPDF_BOOL wordwrap, HPDF_REAL *real_width)
_HPDF_Font_MeasureText=haru.HPDF_Font_MeasureText
_HPDF_Font_MeasureText.restype=HPDF_UINT
def HPDF_Font_MeasureText(
font, #HPDF_Font
text, #POINTER(HPDF_BYTE)
length, #HPDF_UINT
width, #HPDF_REAL
font_size, #HPDF_REAL
char_space, #HPDF_REAL
word_space, #HPDF_REAL
wordwrap, #HPDF_BOOL
real_width, #POINTER(HPDF_REAL)
):
if type(text) in (types.TupleType, types.ListType):
length=len(text)
text=pointer((HPDF_BYTE*length)(*text))
length=HPDF_UINT(int(length))
width=HPDF_REAL(width)
font_size=HPDF_REAL(font_size)
char_space=HPDF_REAL(char_space)
word_space=HPDF_REAL(word_space)
real_width=HPDF_REAL(real_width)
return _HPDF_Font_MeasureText(
font, #HPDF_Font
text, #POINTER(HPDF_BYTE)
length, #HPDF_UINT
width, #HPDF_REAL
font_size, #HPDF_REAL
char_space, #HPDF_REAL
word_space, #HPDF_REAL
wordwrap, #HPDF_BOOL
real_width, #POINTER(HPDF_REAL)
)
#--------------------------------------------------------------------------
#----- extended graphics state --------------------------------------------
#HPDF_ExtGState HPDF_CreateExtGState (HPDF_Doc pdf)
HPDF_CreateExtGState=haru.HPDF_CreateExtGState
HPDF_CreateExtGState.restype=HPDF_ExtGState
#HPDF_STATUS HPDF_ExtGState_SetAlphaStroke (HPDF_ExtGState ext_gstate, HPDF_REAL value)
_HPDF_ExtGState_SetAlphaStroke=haru.HPDF_ExtGState_SetAlphaStroke
_HPDF_ExtGState_SetAlphaStroke.restype=HPDF_STATUS
def HPDF_ExtGState_SetAlphaStroke(
ext_gstate, #HPDF_ExtGState
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_ExtGState_SetAlphaStroke(
ext_gstate, #HPDF_ExtGState
value, #HPDF_REAL
)
#HPDF_STATUS HPDF_ExtGState_SetAlphaFill (HPDF_ExtGState ext_gstate, HPDF_REAL value)
_HPDF_ExtGState_SetAlphaFill=haru.HPDF_ExtGState_SetAlphaFill
_HPDF_ExtGState_SetAlphaFill.restype=HPDF_STATUS
def HPDF_ExtGState_SetAlphaFill(
ext_gstate, #HPDF_ExtGState
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_ExtGState_SetAlphaFill(
ext_gstate, #HPDF_ExtGState
value, #HPDF_REAL
)
#HPDF_STATUS HPDF_ExtGState_SetBlendMode (HPDF_ExtGState ext_gstate, HPDF_BlendMode mode)
HPDF_ExtGState_SetBlendMode=haru.HPDF_ExtGState_SetBlendMode
HPDF_ExtGState_SetBlendMode.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#HPDF_REAL HPDF_Page_TextWidth (HPDF_Page page, const char *text)
_HPDF_Page_TextWidth=haru.HPDF_Page_TextWidth
_HPDF_Page_TextWidth.restype=HPDF_REAL
def HPDF_Page_TextWidth(
page, #HPDF_Page
text, #c_char_p
):
if type(text) in (types.ListType, types.TupleType):
if type(text[-1]) != types.StringType:
text=[chr(i) for i in text]
text=''.join(text)
return _HPDF_Page_TextWidth(
page, #HPDF_Page
text, #c_char_p
)
#HPDF_UINT HPDF_Page_MeasureText (HPDF_Page page, const char *text, HPDF_REAL width, HPDF_BOOL wordwrap, HPDF_REAL *real_width)
_HPDF_Page_MeasureText=haru.HPDF_Page_MeasureText
_HPDF_Page_MeasureText.restype=HPDF_UINT
def HPDF_Page_MeasureText(
page, #HPDF_Page
text, #c_char_p
width, #HPDF_REAL
wordwrap, #HPDF_BOOL
real_width, #POINTER(HPDF_REAL)
):
width=HPDF_REAL(width)
real_width=HPDF_REAL(real_width)
return _HPDF_Page_MeasureText(
page, #HPDF_Page
text, #c_char_p
width, #HPDF_REAL
wordwrap, #HPDF_BOOL
real_width, #POINTER(HPDF_REAL)
)
#HPDF_REAL
#HPDF_Page_GetWidth (HPDF_Page page);
HPDF_Page_GetWidth=haru.HPDF_Page_GetWidth
HPDF_Page_GetWidth.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetHeight (HPDF_Page page)
HPDF_Page_GetHeight=haru.HPDF_Page_GetHeight
HPDF_Page_GetHeight.restype=HPDF_REAL
#HPDF_UINT16 HPDF_Page_GetGMode (HPDF_Page page)
HPDF_Page_GetGMode=haru.HPDF_Page_GetGMode
HPDF_Page_GetGMode.restype=HPDF_UINT16
#HPDF_Point HPDF_Page_GetCurrentPos (HPDF_Page page)
HPDF_Page_GetCurrentPos=haru.HPDF_Page_GetCurrentPos
HPDF_Page_GetCurrentPos.restype=HPDF_Point
#HPDF_STATUS HPDF_Page_GetCurrentPos2 (HPDF_Page page, HPDF_Point *pos)
_HPDF_Page_GetCurrentPos2=haru.HPDF_Page_GetCurrentPos2
_HPDF_Page_GetCurrentPos2.restype=HPDF_STATUS
def HPDF_Page_GetCurrentPos2(
page, #HPDF_Page
pos=None, #POINTER(HPDF_Point)
):
pos=HPDF_Point()
ret= _HPDF_Page_GetCurrentPos2(
page, #HPDF_Page
pos, #POINTER(HPDF_Point)
)
return ret, pos.x, pos.y
#HPDF_Point HPDF_Page_GetCurrentTextPos (HPDF_Page page)
HPDF_Page_GetCurrentTextPos=haru.HPDF_Page_GetCurrentTextPos
HPDF_Page_GetCurrentTextPos.restype=HPDF_Point
#HPDF_STATUS HPDF_Page_GetCurrentTextPos2 (HPDF_Page page, HPDF_Point *pos)
_HPDF_Page_GetCurrentTextPos2=haru.HPDF_Page_GetCurrentTextPos2
_HPDF_Page_GetCurrentTextPos2.restype=HPDF_STATUS
def HPDF_Page_GetCurrentTextPos2(
page, #HPDF_Page
pos=None, #POINTER(HPDF_Point)
):
pos=HPDF_Point()
ret= _HPDF_Page_GetCurrentTextPos2(
page, #HPDF_Page
pos, #POINTER(HPDF_Point)
)
return ret, pos.x, pos.y
#HPDF_Font HPDF_Page_GetCurrentFont (HPDF_Page page)
HPDF_Page_GetCurrentFont=haru.HPDF_Page_GetCurrentFont
HPDF_Page_GetCurrentFont.restype=HPDF_Font
#HPDF_REAL HPDF_Page_GetCurrentFontSize (HPDF_Page page)
HPDF_Page_GetCurrentFontSize=haru.HPDF_Page_GetCurrentFontSize
HPDF_Page_GetCurrentFontSize.restype=HPDF_REAL
#HPDF_TransMatrix HPDF_Page_GetTransMatrix (HPDF_Page page)
HPDF_Page_GetTransMatrix=haru.HPDF_Page_GetTransMatrix
HPDF_Page_GetTransMatrix.restype=HPDF_TransMatrix
#HPDF_REAL HPDF_Page_GetLineWidth (HPDF_Page page)
HPDF_Page_GetLineWidth=haru.HPDF_Page_GetLineWidth
HPDF_Page_GetLineWidth.restype=HPDF_REAL
#HPDF_LineCap HPDF_Page_GetLineCap (HPDF_Page page)
HPDF_Page_GetLineCap=haru.HPDF_Page_GetLineCap
HPDF_Page_GetLineCap.restype=HPDF_LineCap
#HPDF_LineJoin HPDF_Page_GetLineJoin (HPDF_Page page)
HPDF_Page_GetLineJoin=haru.HPDF_Page_GetLineJoin
HPDF_Page_GetLineJoin.restype=HPDF_LineJoin
#HPDF_REAL HPDF_Page_GetMiterLimit (HPDF_Page page)
HPDF_Page_GetMiterLimit=haru.HPDF_Page_GetMiterLimit
HPDF_Page_GetMiterLimit.restype=HPDF_REAL
#HPDF_DashMode HPDF_Page_GetDash (HPDF_Page page)
HPDF_Page_GetDash=haru.HPDF_Page_GetDash
HPDF_Page_GetDash.restype=HPDF_DashMode
#HPDF_REAL HPDF_Page_GetFlat (HPDF_Page page)
HPDF_Page_GetFlat=haru.HPDF_Page_GetFlat
HPDF_Page_GetFlat.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetCharSpace (HPDF_Page page)
HPDF_Page_GetCharSpace=haru.HPDF_Page_GetCharSpace
HPDF_Page_GetCharSpace.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetWordSpace (HPDF_Page page)
HPDF_Page_GetWordSpace=haru.HPDF_Page_GetWordSpace
HPDF_Page_GetWordSpace.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetHorizontalScalling (HPDF_Page page)
HPDF_Page_GetHorizontalScalling=haru.HPDF_Page_GetHorizontalScalling
HPDF_Page_GetHorizontalScalling.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetTextLeading (HPDF_Page page)
HPDF_Page_GetTextLeading=haru.HPDF_Page_GetTextLeading
HPDF_Page_GetTextLeading.restype=HPDF_REAL
#HPDF_TextRenderingMode HPDF_Page_GetTextRenderingMode (HPDF_Page page)
HPDF_Page_GetTextRenderingMode=haru.HPDF_Page_GetTextRenderingMode
HPDF_Page_GetTextRenderingMode.restype=HPDF_TextRenderingMode
# This function is obsolete. Use HPDF_Page_GetTextRise.
#HPDF_REAL HPDF_Page_GetTextRaise (HPDF_Page page)
HPDF_Page_GetTextRaise=haru.HPDF_Page_GetTextRaise
HPDF_Page_GetTextRaise.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetTextRise (HPDF_Page page)
HPDF_Page_GetTextRise=haru.HPDF_Page_GetTextRise
HPDF_Page_GetTextRise.restype=HPDF_REAL
#HPDF_RGBColor HPDF_Page_GetRGBFill (HPDF_Page page)
HPDF_Page_GetRGBFill=haru.HPDF_Page_GetRGBFill
HPDF_Page_GetRGBFill.restype=HPDF_RGBColor
#HPDF_RGBColor HPDF_Page_GetRGBStroke (HPDF_Page page)
HPDF_Page_GetRGBStroke=haru.HPDF_Page_GetRGBStroke
HPDF_Page_GetRGBStroke.restype=HPDF_RGBColor
#HPDF_CMYKColor HPDF_Page_GetCMYKFill (HPDF_Page page)
HPDF_Page_GetCMYKFill=haru.HPDF_Page_GetCMYKFill
HPDF_Page_GetCMYKFill.restype=HPDF_CMYKColor
#HPDF_CMYKColor HPDF_Page_GetCMYKStroke (HPDF_Page page)
HPDF_Page_GetCMYKStroke=haru.HPDF_Page_GetCMYKStroke
HPDF_Page_GetCMYKStroke.restype=HPDF_CMYKColor
#HPDF_REAL HPDF_Page_GetGrayFill (HPDF_Page page)
HPDF_Page_GetGrayFill=haru.HPDF_Page_GetGrayFill
HPDF_Page_GetGrayFill.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetGrayStroke (HPDF_Page page)
HPDF_Page_GetGrayStroke=haru.HPDF_Page_GetGrayStroke
HPDF_Page_GetGrayStroke.restype=HPDF_REAL
#HPDF_ColorSpace HPDF_Page_GetStrokingColorSpace (HPDF_Page page)
HPDF_Page_GetStrokingColorSpace=haru.HPDF_Page_GetStrokingColorSpace
HPDF_Page_GetStrokingColorSpace.restype=HPDF_ColorSpace
#HPDF_ColorSpace HPDF_Page_GetFillingColorSpace (HPDF_Page page)
HPDF_Page_GetFillingColorSpace=haru.HPDF_Page_GetFillingColorSpace
HPDF_Page_GetFillingColorSpace.restype=HPDF_ColorSpace
#HPDF_TransMatrix HPDF_Page_GetTextMatrix (HPDF_Page page)
HPDF_Page_GetTextMatrix=haru.HPDF_Page_GetTextMatrix
HPDF_Page_GetTextMatrix.restype=HPDF_TransMatrix
#HPDF_UINT HPDF_Page_GetGStateDepth (HPDF_Page page)
HPDF_Page_GetGStateDepth=haru.HPDF_Page_GetGStateDepth
HPDF_Page_GetGStateDepth.restype=HPDF_UINT
#--------------------------------------------------------------------------
#----- GRAPHICS OPERATORS -------------------------------------------------
#--- General graphics state ---------------------------------------------
# w
#HPDF_STATUS HPDF_Page_SetLineWidth (HPDF_Page page, HPDF_REAL line_width)
_HPDF_Page_SetLineWidth=haru.HPDF_Page_SetLineWidth
_HPDF_Page_SetLineWidth.restype=HPDF_STATUS
def HPDF_Page_SetLineWidth(
page, #HPDF_Page
line_width, #HPDF_REAL
):
line_width=HPDF_REAL(line_width)
return _HPDF_Page_SetLineWidth(
page, #HPDF_Page
line_width, #HPDF_REAL
)
# J
#HPDF_STATUS HPDF_Page_SetLineCap (HPDF_Page page, HPDF_LineCap line_cap)
HPDF_Page_SetLineCap=haru.HPDF_Page_SetLineCap
HPDF_Page_SetLineCap.restype=HPDF_STATUS
# j
#HPDF_STATUS HPDF_Page_SetLineJoin (HPDF_Page page, HPDF_LineJoin line_join)
HPDF_Page_SetLineJoin=haru.HPDF_Page_SetLineJoin
HPDF_Page_SetLineJoin.restype=HPDF_STATUS
# M
#HPDF_STATUS HPDF_Page_SetMiterLimit (HPDF_Page page, HPDF_REAL miter_limit)
_HPDF_Page_SetMiterLimit=haru.HPDF_Page_SetMiterLimit
_HPDF_Page_SetMiterLimit.restype=HPDF_STATUS
def HPDF_Page_SetMiterLimit(
page, #HPDF_Page
miter_limit, #HPDF_REAL
):
miter_limit=HPDF_REAL(miter_limit)
return _HPDF_Page_SetMiterLimit(
page, #HPDF_Page
miter_limit, #HPDF_REAL
)
# d
#HPDF_STATUS HPDF_Page_SetDash (HPDF_Page page, const HPDF_UINT16 *dash_ptn, HPDF_UINT num_param, HPDF_UINT phase)
_HPDF_Page_SetDash=haru.HPDF_Page_SetDash
_HPDF_Page_SetDash.restype=HPDF_STATUS
def HPDF_Page_SetDash(
page, #HPDF_Page
dash_ptn, #POINTER(HPDF_UINT16)
num_param, #HPDF_UINT
phase, #HPDF_UINT
):
if type(dash_ptn) in (types.ListType, types.TupleType):
num_param=len(dash_ptn)
dash_ptn=pointer((HPDF_UINT16*num_param)(*dash_ptn))
return _HPDF_Page_SetDash(
page, #HPDF_Page
dash_ptn, #POINTER(HPDF_UINT16)
num_param, #HPDF_UINT
phase, #HPDF_UINT
)
# ri --not implemented yet
# i
#HPDF_STATUS HPDF_Page_SetFlat (HPDF_Page page, HPDF_REAL flatness)
_HPDF_Page_SetFlat=haru.HPDF_Page_SetFlat
_HPDF_Page_SetFlat.restype=HPDF_STATUS
def HPDF_Page_SetFlat(
page, #HPDF_Page
flatness, #HPDF_REAL
):
flatness=HPDF_REAL(flatness)
return _HPDF_Page_SetFlat(
page, #HPDF_Page
flatness, #HPDF_REAL
)
# gs
#HPDF_STATUS HPDF_Page_SetExtGState (HPDF_Page page, HPDF_ExtGState ext_gstate)
HPDF_Page_SetExtGState=haru.HPDF_Page_SetExtGState
HPDF_Page_SetExtGState.restype=HPDF_STATUS
#--- Special graphic state operator --------------------------------------
# q
#HPDF_STATUS HPDF_Page_GSave (HPDF_Page page)
HPDF_Page_GSave=haru.HPDF_Page_GSave
HPDF_Page_GSave.restype=HPDF_STATUS
# Q
#HPDF_STATUS HPDF_Page_GRestore (HPDF_Page page)
HPDF_Page_GRestore=haru.HPDF_Page_GRestore
HPDF_Page_GRestore.restype=HPDF_STATUS
# cm
#HPDF_STATUS HPDF_Page_Concat (HPDF_Page page, HPDF_REAL a, HPDF_REAL b, HPDF_REAL c, HPDF_REAL d, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_Concat=haru.HPDF_Page_Concat
_HPDF_Page_Concat.restype=HPDF_STATUS
def HPDF_Page_Concat(
page, #HPDF_Page
a, #HPDF_REAL
b, #HPDF_REAL
c, #HPDF_REAL
d, #HPDF_REAL
x, #HPDF_REAL
y, #HPDF_REAL
):
a=HPDF_REAL(a)
b=HPDF_REAL(b)
c=HPDF_REAL(c)
d=HPDF_REAL(d)
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_Concat(
page, #HPDF_Page
a, #HPDF_REAL
b, #HPDF_REAL
c, #HPDF_REAL
d, #HPDF_REAL
x, #HPDF_REAL
y, #HPDF_REAL
)
#--- Path construction operator ------------------------------------------
# m
#HPDF_STATUS HPDF_Page_MoveTo (HPDF_Page page, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_MoveTo=haru.HPDF_Page_MoveTo
_HPDF_Page_MoveTo.restype=HPDF_STATUS
def HPDF_Page_MoveTo(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_MoveTo(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
)
# l
#HPDF_STATUS HPDF_Page_LineTo (HPDF_Page page, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_LineTo=haru.HPDF_Page_LineTo
_HPDF_Page_LineTo.restype=HPDF_STATUS
def HPDF_Page_LineTo(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_LineTo(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
)
# c
#HPDF_STATUS HPDF_Page_CurveTo (HPDF_Page page, HPDF_REAL x1, HPDF_REAL y1, HPDF_REAL x2, HPDF_REAL y2, HPDF_REAL x3, HPDF_REAL y3)
_HPDF_Page_CurveTo=haru.HPDF_Page_CurveTo
_HPDF_Page_CurveTo.restype=HPDF_STATUS
def HPDF_Page_CurveTo(
page, #HPDF_Page
x1, #HPDF_REAL
y1, #HPDF_REAL
x2, #HPDF_REAL
y2, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
):
x1=HPDF_REAL(x1)
y1=HPDF_REAL(y1)
x2=HPDF_REAL(x2)
y2=HPDF_REAL(y2)
x3=HPDF_REAL(x3)
y3=HPDF_REAL(y3)
return _HPDF_Page_CurveTo(
page, #HPDF_Page
x1, #HPDF_REAL
y1, #HPDF_REAL
x2, #HPDF_REAL
y2, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
)
# v
#HPDF_STATUS HPDF_Page_CurveTo2 (HPDF_Page page, HPDF_REAL x2, HPDF_REAL y2, HPDF_REAL x3, HPDF_REAL y3)
_HPDF_Page_CurveTo2=haru.HPDF_Page_CurveTo2
_HPDF_Page_CurveTo2.restype=HPDF_STATUS
def HPDF_Page_CurveTo2(
page, #HPDF_Page
x2, #HPDF_REAL
y2, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
):
x2=HPDF_REAL(x2)
y2=HPDF_REAL(y2)
x3=HPDF_REAL(x3)
y3=HPDF_REAL(y3)
return _HPDF_Page_CurveTo2(
page, #HPDF_Page
x2, #HPDF_REAL
y2, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
)
# y
#HPDF_STATUS HPDF_Page_CurveTo3 (HPDF_Page page, HPDF_REAL x1, HPDF_REAL y1, HPDF_REAL x3, HPDF_REAL y3)
_HPDF_Page_CurveTo3=haru.HPDF_Page_CurveTo3
_HPDF_Page_CurveTo3.restype=HPDF_STATUS
def HPDF_Page_CurveTo3(
page, #HPDF_Page
x1, #HPDF_REAL
y1, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
):
x1=HPDF_REAL(x1)
y1=HPDF_REAL(y1)
x3=HPDF_REAL(x3)
y3=HPDF_REAL(y3)
return _HPDF_Page_CurveTo3(
page, #HPDF_Page
x1, #HPDF_REAL
y1, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
)
# h
#HPDF_STATUS HPDF_Page_ClosePath (HPDF_Page page)
HPDF_Page_ClosePath=haru.HPDF_Page_ClosePath
HPDF_Page_ClosePath.restype=HPDF_STATUS
# re
#HPDF_STATUS HPDF_Page_Rectangle (HPDF_Page page, HPDF_REAL x, HPDF_REAL y, HPDF_REAL width, HPDF_REAL height)
_HPDF_Page_Rectangle=haru.HPDF_Page_Rectangle
_HPDF_Page_Rectangle.restype=HPDF_STATUS
def HPDF_Page_Rectangle(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
width, #HPDF_REAL
height, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
width=HPDF_REAL(width)
height=HPDF_REAL(height)
return _HPDF_Page_Rectangle(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
width, #HPDF_REAL
height, #HPDF_REAL
)
#--- Path painting operator ---------------------------------------------
# S
#HPDF_STATUS HPDF_Page_Stroke (HPDF_Page page)
_HPDF_Page_Stroke=haru.HPDF_Page_Stroke
_HPDF_Page_Stroke.restype=HPDF_STATUS
def HPDF_Page_Stroke(
page, #HPDF_Page
):
return _HPDF_Page_Stroke(
page, #HPDF_Page
)
# s
#HPDF_STATUS HPDF_Page_ClosePathStroke (HPDF_Page page)
HPDF_Page_ClosePathStroke=haru.HPDF_Page_ClosePathStroke
HPDF_Page_ClosePathStroke.restype=HPDF_STATUS
# f
#HPDF_STATUS HPDF_Page_Fill (HPDF_Page page)
HPDF_Page_Fill=haru.HPDF_Page_Fill
HPDF_Page_Fill.restype=HPDF_STATUS
# f*
#HPDF_STATUS HPDF_Page_Eofill (HPDF_Page page)
HPDF_Page_Eofill=haru.HPDF_Page_Eofill
HPDF_Page_Eofill.restype=HPDF_STATUS
# B
#HPDF_STATUS HPDF_Page_FillStroke (HPDF_Page page)
HPDF_Page_FillStroke=haru.HPDF_Page_FillStroke
HPDF_Page_FillStroke.restype=HPDF_STATUS
# B*
#HPDF_STATUS HPDF_Page_EofillStroke (HPDF_Page page)
HPDF_Page_EofillStroke=haru.HPDF_Page_EofillStroke
HPDF_Page_EofillStroke.restype=HPDF_STATUS
# b
#HPDF_STATUS HPDF_Page_ClosePathFillStroke (HPDF_Page page)
HPDF_Page_ClosePathFillStroke=haru.HPDF_Page_ClosePathFillStroke
HPDF_Page_ClosePathFillStroke.restype=HPDF_STATUS
# b*
#HPDF_STATUS HPDF_Page_ClosePathEofillStroke (HPDF_Page page)
HPDF_Page_ClosePathEofillStroke=haru.HPDF_Page_ClosePathEofillStroke
HPDF_Page_ClosePathEofillStroke.restype=HPDF_STATUS
# n
#HPDF_STATUS HPDF_Page_EndPath (HPDF_Page page)
HPDF_Page_EndPath=haru.HPDF_Page_EndPath
HPDF_Page_EndPath.restype=HPDF_STATUS
#--- Clipping paths operator --------------------------------------------
# W
#HPDF_STATUS HPDF_Page_Clip (HPDF_Page page)
HPDF_Page_Clip=haru.HPDF_Page_Clip
HPDF_Page_Clip.restype=HPDF_STATUS
# W*
#HPDF_STATUS HPDF_Page_Eoclip (HPDF_Page page)
HPDF_Page_Eoclip=haru.HPDF_Page_Eoclip
HPDF_Page_Eoclip.restype=HPDF_STATUS
#--- Text object operator -----------------------------------------------
# BT
#HPDF_STATUS HPDF_Page_BeginText (HPDF_Page page)
HPDF_Page_BeginText=haru.HPDF_Page_BeginText
HPDF_Page_BeginText.restype=HPDF_STATUS
# ET
#HPDF_STATUS HPDF_Page_EndText (HPDF_Page page)
HPDF_Page_EndText=haru.HPDF_Page_EndText
HPDF_Page_EndText.restype=HPDF_STATUS
#--- Text state ---------------------------------------------------------
# Tc
#HPDF_STATUS HPDF_Page_SetCharSpace (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetCharSpace=haru.HPDF_Page_SetCharSpace
_HPDF_Page_SetCharSpace.restype=HPDF_STATUS
def HPDF_Page_SetCharSpace(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetCharSpace(
page, #HPDF_Page
value, #HPDF_REAL
)
# Tw
#HPDF_STATUS HPDF_Page_SetWordSpace (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetWordSpace=haru.HPDF_Page_SetWordSpace
_HPDF_Page_SetWordSpace.restype=HPDF_STATUS
def HPDF_Page_SetWordSpace(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetWordSpace(
page, #HPDF_Page
value, #HPDF_REAL
)
# Tz
#HPDF_STATUS HPDF_Page_SetHorizontalScalling (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetHorizontalScalling=haru.HPDF_Page_SetHorizontalScalling
_HPDF_Page_SetHorizontalScalling.restype=HPDF_STATUS
def HPDF_Page_SetHorizontalScalling(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetHorizontalScalling(
page, #HPDF_Page
value, #HPDF_REAL
)
# TL
#HPDF_STATUS HPDF_Page_SetTextLeading (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetTextLeading=haru.HPDF_Page_SetTextLeading
_HPDF_Page_SetTextLeading.restype=HPDF_STATUS
def HPDF_Page_SetTextLeading(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetTextLeading(
page, #HPDF_Page
value, #HPDF_REAL
)
# Tf
#HPDF_STATUS HPDF_Page_SetFontAndSize (HPDF_Page page, HPDF_Font font, HPDF_REAL size)
_HPDF_Page_SetFontAndSize=haru.HPDF_Page_SetFontAndSize
_HPDF_Page_SetFontAndSize.restype=HPDF_STATUS
def HPDF_Page_SetFontAndSize(
page, #HPDF_Page
font, #HPDF_Font
size, #HPDF_REAL
):
size=HPDF_REAL(size)
return _HPDF_Page_SetFontAndSize(
page, #HPDF_Page
font, #HPDF_Font
size, #HPDF_REAL
)
# Tr
#HPDF_STATUS HPDF_Page_SetTextRenderingMode (HPDF_Page page, HPDF_TextRenderingMode mode)
HPDF_Page_SetTextRenderingMode=haru.HPDF_Page_SetTextRenderingMode
HPDF_Page_SetTextRenderingMode.restype=HPDF_STATUS
# Ts
#HPDF_STATUS HPDF_Page_SetTextRise (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetTextRise=haru.HPDF_Page_SetTextRise
_HPDF_Page_SetTextRise.restype=HPDF_STATUS
def HPDF_Page_SetTextRise(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetTextRise(
page, #HPDF_Page
value, #HPDF_REAL
)
# This function is obsolete. Use HPDF_Page_SetTextRise.
#HPDF_STATUS HPDF_Page_SetTextRaise (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetTextRaise=haru.HPDF_Page_SetTextRaise
_HPDF_Page_SetTextRaise.restype=HPDF_STATUS
def HPDF_Page_SetTextRaise(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetTextRaise(
page, #HPDF_Page
value, #HPDF_REAL
)
#--- Text positioning ---------------------------------------------------
# Td
#HPDF_STATUS HPDF_Page_MoveTextPos (HPDF_Page page, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_MoveTextPos=haru.HPDF_Page_MoveTextPos
_HPDF_Page_MoveTextPos.restype=HPDF_STATUS
def HPDF_Page_MoveTextPos(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_MoveTextPos(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
)
# TD
#HPDF_STATUS HPDF_Page_MoveTextPos2 (HPDF_Page page, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_MoveTextPos2=haru.HPDF_Page_MoveTextPos2
_HPDF_Page_MoveTextPos2.restype=HPDF_STATUS
def HPDF_Page_MoveTextPos2(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_MoveTextPos2(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
)
# Tm
#HPDF_STATUS HPDF_Page_SetTextMatrix (HPDF_Page page, HPDF_REAL a, HPDF_REAL b, HPDF_REAL c, HPDF_REAL d, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_SetTextMatrix=haru.HPDF_Page_SetTextMatrix
_HPDF_Page_SetTextMatrix.restype=HPDF_STATUS
def HPDF_Page_SetTextMatrix(
page, #HPDF_Page
a, #HPDF_REAL
b, #HPDF_REAL
c, #HPDF_REAL
d, #HPDF_REAL
x, #HPDF_REAL
y, #HPDF_REAL
):
a=HPDF_REAL(a)
b=HPDF_REAL(b)
c=HPDF_REAL(c)
d=HPDF_REAL(d)
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_SetTextMatrix(
page, #HPDF_Page
a, #HPDF_REAL
b, #HPDF_REAL
c, #HPDF_REAL
d, #HPDF_REAL
x, #HPDF_REAL
y, #HPDF_REAL
)
# T*
#HPDF_STATUS HPDF_Page_MoveToNextLine (HPDF_Page page)
HPDF_Page_MoveToNextLine=haru.HPDF_Page_MoveToNextLine
HPDF_Page_MoveToNextLine.restype=HPDF_STATUS
#--- Text showing -------------------------------------------------------
# Tj
#HPDF_STATUS HPDF_Page_ShowText (HPDF_Page page, const char *text)
_HPDF_Page_ShowText=haru.HPDF_Page_ShowText
_HPDF_Page_ShowText.restype=HPDF_STATUS
def HPDF_Page_ShowText(page,
text
):
if type(text) in (types.ListType, types.TupleType):
if type(text[-1]) != types.StringType:
text=[chr(i) for i in text]
text=''.join(text)
return _HPDF_Page_ShowText(page,
text
)
# TJ
# '
#HPDF_STATUS HPDF_Page_ShowTextNextLine (HPDF_Page page, const char *text)
HPDF_Page_ShowTextNextLine=haru.HPDF_Page_ShowTextNextLine
HPDF_Page_ShowTextNextLine.restype=HPDF_STATUS
# "
#HPDF_STATUS HPDF_Page_ShowTextNextLineEx (HPDF_Page page, HPDF_REAL word_space, HPDF_REAL char_space, const char *text)
_HPDF_Page_ShowTextNextLineEx=haru.HPDF_Page_ShowTextNextLineEx
_HPDF_Page_ShowTextNextLineEx.restype=HPDF_STATUS
def HPDF_Page_ShowTextNextLineEx(
page, #HPDF_Page
word_space, #HPDF_REAL
char_space, #HPDF_REAL
text, #c_char_p
):
word_space=HPDF_REAL(word_space)
char_space=HPDF_REAL(char_space)
return _HPDF_Page_ShowTextNextLineEx(
page, #HPDF_Page
word_space, #HPDF_REAL
char_space, #HPDF_REAL
text, #c_char_p
)
#--- Color showing ------------------------------------------------------
# cs --not implemented yet
# CS --not implemented yet
# sc --not implemented yet
# scn --not implemented yet
# SC --not implemented yet
# SCN --not implemented yet
# g
#HPDF_STATUS HPDF_Page_SetGrayFill (HPDF_Page page, HPDF_REAL gray)
_HPDF_Page_SetGrayFill=haru.HPDF_Page_SetGrayFill
_HPDF_Page_SetGrayFill.restype=HPDF_STATUS
def HPDF_Page_SetGrayFill(
page, #HPDF_Page
gray, #HPDF_REAL
):
gray=HPDF_REAL(gray)
return _HPDF_Page_SetGrayFill(
page, #HPDF_Page
gray, #HPDF_REAL
)
# G
#HPDF_STATUS HPDF_Page_SetGrayStroke (HPDF_Page page, HPDF_REAL gray)
_HPDF_Page_SetGrayStroke=haru.HPDF_Page_SetGrayStroke
_HPDF_Page_SetGrayStroke.restype=HPDF_STATUS
def HPDF_Page_SetGrayStroke(
page, #HPDF_Page
gray, #HPDF_REAL
):
gray=HPDF_REAL(gray)
return _HPDF_Page_SetGrayStroke(
page, #HPDF_Page
gray, #HPDF_REAL
)
# rg
#HPDF_STATUS HPDF_Page_SetRGBFill (HPDF_Page page, HPDF_REAL r, HPDF_REAL g, HPDF_REAL b)
_HPDF_Page_SetRGBFill=haru.HPDF_Page_SetRGBFill
_HPDF_Page_SetRGBFill.restype=HPDF_STATUS
def HPDF_Page_SetRGBFill(
page, #HPDF_Page
r, #HPDF_REAL
g, #HPDF_REAL
b, #HPDF_REAL
):
r=HPDF_REAL(r)
g=HPDF_REAL(g)
b=HPDF_REAL(b)
return _HPDF_Page_SetRGBFill(
page, #HPDF_Page
r, #HPDF_REAL
g, #HPDF_REAL
b, #HPDF_REAL
)
# RG
#HPDF_STATUS HPDF_Page_SetRGBStroke (HPDF_Page page, HPDF_REAL r, HPDF_REAL g, HPDF_REAL b)
_HPDF_Page_SetRGBStroke=haru.HPDF_Page_SetRGBStroke
_HPDF_Page_SetRGBStroke.restype=HPDF_STATUS
def HPDF_Page_SetRGBStroke(
page, #HPDF_Page
r, #HPDF_REAL
g, #HPDF_REAL
b, #HPDF_REAL
):
r=HPDF_REAL(r)
g=HPDF_REAL(g)
b=HPDF_REAL(b)
return _HPDF_Page_SetRGBStroke(
page, #HPDF_Page
r, #HPDF_REAL
g, #HPDF_REAL
b, #HPDF_REAL
)
# k
#HPDF_STATUS HPDF_Page_SetCMYKFill (HPDF_Page page, HPDF_REAL c, HPDF_REAL m, HPDF_REAL y, HPDF_REAL k)
_HPDF_Page_SetCMYKFill=haru.HPDF_Page_SetCMYKFill
_HPDF_Page_SetCMYKFill.restype=HPDF_STATUS
def HPDF_Page_SetCMYKFill(
page, #HPDF_Page
c, #HPDF_REAL
m, #HPDF_REAL
y, #HPDF_REAL
k, #HPDF_REAL
):
c=HPDF_REAL(c)
m=HPDF_REAL(m)
y=HPDF_REAL(y)
k=HPDF_REAL(k)
return _HPDF_Page_SetCMYKFill(
page, #HPDF_Page
c, #HPDF_REAL
m, #HPDF_REAL
y, #HPDF_REAL
k, #HPDF_REAL
)
# K
#HPDF_STATUS HPDF_Page_SetCMYKStroke (HPDF_Page page, HPDF_REAL c, HPDF_REAL m, HPDF_REAL y, HPDF_REAL k)
_HPDF_Page_SetCMYKStroke=haru.HPDF_Page_SetCMYKStroke
_HPDF_Page_SetCMYKStroke.restype=HPDF_STATUS
def HPDF_Page_SetCMYKStroke(
page, #HPDF_Page
c, #HPDF_REAL
m, #HPDF_REAL
y, #HPDF_REAL
k, #HPDF_REAL
):
c=HPDF_REAL(c)
m=HPDF_REAL(m)
y=HPDF_REAL(y)
k=HPDF_REAL(k)
return _HPDF_Page_SetCMYKStroke(
page, #HPDF_Page
c, #HPDF_REAL
m, #HPDF_REAL
y, #HPDF_REAL
k, #HPDF_REAL
)
#--- Shading patterns ---------------------------------------------------
# sh --not implemented yet
#--- In-line images -----------------------------------------------------
# BI --not implemented yet
# ID --not implemented yet
# EI --not implemented yet
#--- XObjects -----------------------------------------------------------
# Do
#HPDF_STATUS HPDF_Page_ExecuteXObject (HPDF_Page page, HPDF_XObject obj)
HPDF_Page_ExecuteXObject=haru.HPDF_Page_ExecuteXObject
HPDF_Page_ExecuteXObject.restype=HPDF_STATUS
#--- Marked content -----------------------------------------------------
# BMC --not implemented yet
# BDC --not implemented yet
# EMC --not implemented yet
# MP --not implemented yet
# DP --not implemented yet
#--- Compatibility ------------------------------------------------------
# BX --not implemented yet
# EX --not implemented yet
#HPDF_STATUS HPDF_Page_DrawImage (HPDF_Page page, HPDF_Image image, HPDF_REAL x, HPDF_REAL y, HPDF_REAL width, HPDF_REAL height)
_HPDF_Page_DrawImage=haru.HPDF_Page_DrawImage
_HPDF_Page_DrawImage.restype=HPDF_STATUS
def HPDF_Page_DrawImage(
page, #HPDF_Page
image, #HPDF_Image
x, #HPDF_REAL
y, #HPDF_REAL
width, #HPDF_REAL
height, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
width=HPDF_REAL(width)
height=HPDF_REAL(height)
return _HPDF_Page_DrawImage(
page, #HPDF_Page
image, #HPDF_Image
x, #HPDF_REAL
y, #HPDF_REAL
width, #HPDF_REAL
height, #HPDF_REAL
)
#HPDF_STATUS HPDF_Page_Circle (HPDF_Page page, HPDF_REAL x, HPDF_REAL y, HPDF_REAL ray)
_HPDF_Page_Circle=haru.HPDF_Page_Circle
_HPDF_Page_Circle.restype=HPDF_STATUS
def HPDF_Page_Circle(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
ray, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
ray=HPDF_REAL(ray)
return _HPDF_Page_Circle(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
ray, #HPDF_REAL
)
#HPDF_STATUS HPDF_Page_Ellipse (HPDF_Page page, HPDF_REAL x, HPDF_REAL y, HPDF_REAL xray, HPDF_REAL yray)
_HPDF_Page_Ellipse=haru.HPDF_Page_Ellipse
_HPDF_Page_Ellipse.restype=HPDF_STATUS
def HPDF_Page_Ellipse(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
xray, #HPDF_REAL
yray, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
xray=HPDF_REAL(xray)
yray=HPDF_REAL(yray)
return _HPDF_Page_Ellipse(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
xray, #HPDF_REAL
yray, #HPDF_REAL
)
#HPDF_STATUS HPDF_Page_Arc (HPDF_Page page, HPDF_REAL x, HPDF_REAL y, HPDF_REAL ray, HPDF_REAL ang1, HPDF_REAL ang2)
_HPDF_Page_Arc=haru.HPDF_Page_Arc
_HPDF_Page_Arc.restype=HPDF_STATUS
def HPDF_Page_Arc(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
ray, #HPDF_REAL
ang1, #HPDF_REAL
ang2, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
ray=HPDF_REAL(ray)
ang1=HPDF_REAL(ang1)
ang2=HPDF_REAL(ang2)
return _HPDF_Page_Arc(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
ray, #HPDF_REAL
ang1, #HPDF_REAL
ang2, #HPDF_REAL
)
#HPDF_STATUS HPDF_Page_TextOut (HPDF_Page page, HPDF_REAL xpos, HPDF_REAL ypos, const char *text)
_HPDF_Page_TextOut=haru.HPDF_Page_TextOut
_HPDF_Page_TextOut.restype=HPDF_STATUS
def HPDF_Page_TextOut(
page, #HPDF_Page
xpos, #HPDF_REAL
ypos, #HPDF_REAL
text, #c_char_p
):
xpos=HPDF_REAL(xpos)
ypos=HPDF_REAL(ypos)
if type(text) in (types.ListType, types.TupleType):
if type(text[-1]) != types.StringType:
text=[chr(i) for i in text]
text=''.join(text)
return _HPDF_Page_TextOut(
page, #HPDF_Page
xpos, #HPDF_REAL
ypos, #HPDF_REAL
text, #c_char_p
)
#HPDF_STATUS HPDF_Page_TextRect (HPDF_Page page, HPDF_REAL left, HPDF_REAL top, HPDF_REAL right, HPDF_REAL bottom, const char *text, HPDF_TextAlignment align, HPDF_UINT *len)
#???
_HPDF_Page_TextRect=haru.HPDF_Page_TextRect
_HPDF_Page_TextRect.restype=HPDF_STATUS
def HPDF_Page_TextRect(
page, #HPDF_Page
left, #HPDF_REAL
top, #HPDF_REAL
right, #HPDF_REAL
bottom, #HPDF_REAL
text, #c_char_p
align, #HPDF_TextAlignment
length, #POINTER(HPDF_UINT)
):
left=HPDF_REAL(left)
top=HPDF_REAL(top)
right=HPDF_REAL(right)
bottom=HPDF_REAL(bottom)
if type(length) in (types.ListType, types.TupleType):
size=len(length)
length=pointer((HPDF_UINT*size)(*length))
return _HPDF_Page_TextRect(
page, #HPDF_Page
left, #HPDF_REAL
top, #HPDF_REAL
right, #HPDF_REAL
bottom, #HPDF_REAL
text, #c_char_p
align, #HPDF_TextAlignment
length, #POINTER(HPDF_UINT)
)
#HPDF_STATUS HPDF_Page_SetSlideShow (HPDF_Page page, HPDF_TransitionStyle type, HPDF_REAL disp_time, HPDF_REAL trans_time)
_HPDF_Page_SetSlideShow=haru.HPDF_Page_SetSlideShow
_HPDF_Page_SetSlideShow.restype=HPDF_STATUS
def HPDF_Page_SetSlideShow(
page, #HPDF_Page
tType, #HPDF_TransitionStyle
disp_time, #HPDF_REAL
trans_time, #HPDF_REAL
):
disp_time=HPDF_REAL(disp_time)
trans_time=HPDF_REAL(trans_time)
return _HPDF_Page_SetSlideShow(
page, #HPDF_Page
tType, #HPDF_TransitionStyle
disp_time, #HPDF_REAL
trans_time, #HPDF_REAL
)
NULL=0
HPDF_NOPNGLIB=False
| Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- png_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def draw_image (pdf, filename, x, y, text):
page = HPDF_GetCurrentPage (pdf)
filename1= "pngsuite/%s" % filename
image = HPDF_LoadPngImageFromFile (pdf, filename1)
# Draw image to the canvas.
HPDF_Page_DrawImage (page, image, x, y, HPDF_Image_GetWidth (image),
HPDF_Image_GetHeight (image))
# Print the text.
HPDF_Page_BeginText (page)
HPDF_Page_SetTextLeading (page, 16)
HPDF_Page_MoveTextPos (page, x, y)
HPDF_Page_ShowTextNextLine (page, filename)
HPDF_Page_ShowTextNextLine (page, text)
HPDF_Page_EndText (page)
def main():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetWidth (page, 550)
HPDF_Page_SetHeight (page, 650)
dst = HPDF_Page_CreateDestination (page)
HPDF_Destination_SetXYZ (dst, 0, HPDF_Page_GetHeight (page), 1)
HPDF_SetOpenAction(pdf, dst)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_MoveTextPos (page, 220, HPDF_Page_GetHeight (page) - 70)
HPDF_Page_ShowText (page, "PngDemo")
HPDF_Page_EndText (page)
HPDF_Page_SetFontAndSize (page, font, 12)
draw_image (pdf, "basn0g01.png", 100, HPDF_Page_GetHeight (page) - 150,
"1bit grayscale.")
draw_image (pdf, "basn0g02.png", 200, HPDF_Page_GetHeight (page) - 150,
"2bit grayscale.")
draw_image (pdf, "basn0g04.png", 300, HPDF_Page_GetHeight (page) - 150,
"4bit grayscale.")
draw_image (pdf, "basn0g08.png", 400, HPDF_Page_GetHeight (page) - 150,
"8bit grayscale.")
draw_image (pdf, "basn2c08.png", 100, HPDF_Page_GetHeight (page) - 250,
"8bit color.")
draw_image (pdf, "basn2c16.png", 200, HPDF_Page_GetHeight (page) - 250,
"16bit color.")
draw_image (pdf, "basn3p01.png", 100, HPDF_Page_GetHeight (page) - 350,
"1bit pallet.")
draw_image (pdf, "basn3p02.png", 200, HPDF_Page_GetHeight (page) - 350,
"2bit pallet.")
draw_image (pdf, "basn3p04.png", 300, HPDF_Page_GetHeight (page) - 350,
"4bit pallet.")
draw_image (pdf, "basn3p08.png", 400, HPDF_Page_GetHeight (page) - 350,
"8bit pallet.")
draw_image (pdf, "basn4a08.png", 100, HPDF_Page_GetHeight (page) - 450,
"8bit alpha.")
draw_image (pdf, "basn4a16.png", 200, HPDF_Page_GetHeight (page) - 450,
"16bit alpha.")
draw_image (pdf, "basn6a08.png", 100, HPDF_Page_GetHeight (page) - 550,
"8bit alpha.")
draw_image (pdf, "basn6a16.png", 200, HPDF_Page_GetHeight (page) - 550,
"16bit alpha.")
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
if HPDF_NOPNGLIB:
printf("WARNING: if you want to run this demo, \n"
"make libhpdf with HPDF_USE_PNGLIB option.\n")
sys.exit(1)
else:
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- permission.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
text = "User cannot print and copy this document."
owner_passwd = "owner"
user_passwd = ""
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetSize (page, HPDF_PAGE_SIZE_B5, HPDF_PAGE_LANDSCAPE)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 20)
tw = HPDF_Page_TextWidth (page, text)
HPDF_Page_MoveTextPos (page, (HPDF_Page_GetWidth (page) - tw) / 2,
(HPDF_Page_GetHeight (page) - 20) / 2)
HPDF_Page_ShowText (page, text)
HPDF_Page_EndText (page)
HPDF_SetPassword (pdf, owner_passwd, user_passwd)
HPDF_SetPermission (pdf, HPDF_ENABLE_READ)
HPDF_SetEncryptionMode (pdf, HPDF_ENCRYPT_R3, 16)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- grid_sheet.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def print_grid (pdf, page):
height = HPDF_Page_GetHeight (page)
width = HPDF_Page_GetWidth (page)
font = HPDF_GetFont (pdf, "Helvetica", NULL)
HPDF_Page_SetFontAndSize (page, font, 5)
HPDF_Page_SetGrayFill (page, 0.5)
HPDF_Page_SetGrayStroke (page, 0.8)
# Draw horizontal lines
y = 0
while (y < height):
if (y % 10 == 0):
HPDF_Page_SetLineWidth (page, 0.5)
else:
if (HPDF_Page_GetLineWidth (page) != 0.25):
HPDF_Page_SetLineWidth (page, 0.25)
HPDF_Page_MoveTo (page, 0, y)
HPDF_Page_LineTo (page, width, y)
HPDF_Page_Stroke (page)
if (y % 10 == 0 and y > 0):
HPDF_Page_SetGrayStroke (page, 0.5)
HPDF_Page_MoveTo (page, 0, y)
HPDF_Page_LineTo (page, 5, y)
HPDF_Page_Stroke (page)
HPDF_Page_SetGrayStroke (page, 0.8)
y += 5
# Draw virtical lines
x = 0
while (x < width):
if (x % 10 == 0):
HPDF_Page_SetLineWidth (page, 0.5)
else:
if (HPDF_Page_GetLineWidth (page) != 0.25):
HPDF_Page_SetLineWidth (page, 0.25)
HPDF_Page_MoveTo (page, x, 0)
HPDF_Page_LineTo (page, x, height)
HPDF_Page_Stroke (page)
if (x % 50 == 0 and x > 0):
HPDF_Page_SetGrayStroke (page, 0.5)
HPDF_Page_MoveTo (page, x, 0)
HPDF_Page_LineTo (page, x, 5)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, x, height)
HPDF_Page_LineTo (page, x, height - 5)
HPDF_Page_Stroke (page)
HPDF_Page_SetGrayStroke (page, 0.8)
x += 5
# Draw horizontal text
y = 0
while (y < height):
if (y % 10 == 0 and y > 0):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 5, y - 2)
buf="%u" % y
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
y += 5
# Draw virtical text
x = 0
while (x < width):
if (x % 50 == 0 and x > 0):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x, 5)
buf="%u" % x
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x, height - 10)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
x += 5
HPDF_Page_SetGrayFill (page, 0)
HPDF_Page_SetGrayStroke (page, 0)
def main():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetHeight (page, 600)
HPDF_Page_SetWidth (page, 400)
print_grid (pdf, page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
if __name__=='__main__':
main()
__all__=['print_grid'] | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- ttfont_demo_jp.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
if (len(sys.argv) < 2):
printf ("ttfont_jp_demo <ttf-font-filename> [-E]\n")
printf ("ttfont_jp_demo <ttc-font-filename> <index> [-E]\n")
return 1
try:
f = open ("mbtext/sjis.txt", "rb")
except:
printf ("error: cannot open 'mbtext/sjis.txt'\n")
return 1
SAMP_TXT=f.read(2048)
f.close()
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# declaration for using Japanese encoding.
HPDF_UseJPEncodings (pdf)
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# load ttc file
if len(sys.argv) == 4 and sys.argv[3]=="-E":
detail_font_name = HPDF_LoadTTFontFromFile2 (pdf, sys.argv[1],
int (sys.argv[2]), HPDF_TRUE)
elif len(sys.argv) == 3 and sys.argv[2]=="-E":
detail_font_name = HPDF_LoadTTFontFromFile (pdf, sys.argv[1], HPDF_TRUE)
elif len(sys.argv) == 3:
detail_font_name = HPDF_LoadTTFontFromFile2 (pdf, sys.argv[1],
int (sys.argv[2]), HPDF_FALSE)
else:
detail_font_name = HPDF_LoadTTFontFromFile (pdf, sys.argv[1], HPDF_FALSE)
# add a new page object.
page = HPDF_AddPage (pdf)
title_font = HPDF_GetFont (pdf, "Helvetica", NULL)
detail_font = HPDF_GetFont (pdf, detail_font_name, "90msp-RKSJ-H")
HPDF_Page_SetFontAndSize (page, title_font, 10)
HPDF_Page_BeginText (page)
# move the position of the text to top of the page.
HPDF_Page_MoveTextPos(page, 10, 190)
HPDF_Page_ShowText (page, detail_font_name)
HPDF_Page_ShowText (page, " (")
HPDF_Page_ShowText (page, HPDF_Font_GetEncodingName (detail_font))
HPDF_Page_ShowText (page, ")")
HPDF_Page_SetFontAndSize (page, detail_font, 15)
HPDF_Page_MoveTextPos (page, 10, -20)
HPDF_Page_ShowText (page, "abcdefghijklmnopqrstuvwxyz")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "1234567890")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_SetFontAndSize (page, detail_font, 10)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -18)
HPDF_Page_SetFontAndSize (page, detail_font, 16)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -27)
HPDF_Page_SetFontAndSize (page, detail_font, 23)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -36)
HPDF_Page_SetFontAndSize (page, detail_font, 30)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -36)
pw = HPDF_Page_TextWidth (page, SAMP_TXT)
page_height = 210
page_width = pw + 40
HPDF_Page_SetWidth (page, page_width)
HPDF_Page_SetHeight (page, page_height)
# finish to print text.
HPDF_Page_EndText (page)
HPDF_Page_SetLineWidth (page, 0.5)
HPDF_Page_MoveTo (page, 10, page_height - 25)
HPDF_Page_LineTo (page, page_width - 10, page_height - 25)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, page_height - 85)
HPDF_Page_LineTo (page, page_width - 10, page_height - 85)
HPDF_Page_Stroke (page)
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
#coding=ISO8859-2
###
## * << Haru Free PDF Library 2.0.0 >> -- outline_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def print_page (page, page_num):
HPDF_Page_SetWidth (page, 800)
HPDF_Page_SetHeight (page, 800)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 30, 740)
buf="Page:%d" % page_num
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
def main():
global pdf
page=[None for i in range(4)]
outline=[None for i in range(4)]
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# Add 3 pages to the document.
page[0] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[0], font, 30)
print_page(page[0], 1)
page[1] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[1], font, 30)
print_page(page[1], 2)
page[2] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[2], font, 30)
print_page(page[2], 3)
# create outline root.
root = HPDF_CreateOutline (pdf, NULL, "OutlineRoot", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
outline[0] = HPDF_CreateOutline (pdf, root, "page1", NULL)
outline[1] = HPDF_CreateOutline (pdf, root, "page2", NULL)
# create outline with test which is ISO8859-2 encoding
#outline[2] = HPDF_CreateOutline (pdf, root, "ISO8859-2 text 釉罩棕?,
# HPDF_GetEncoder (pdf, "ISO8859-2"))
# create destination objects on each pages
# and link it to outline items.
dst = HPDF_Page_CreateDestination (page[0])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[0]), 1)
HPDF_Outline_SetDestination(outline[0], dst)
#HPDF_Catalog_SetOpenAction(dst)
dst = HPDF_Page_CreateDestination (page[1])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[1]), 1)
HPDF_Outline_SetDestination(outline[1], dst)
dst = HPDF_Page_CreateDestination (page[2])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[2]), 1)
HPDF_Outline_SetDestination(outline[2], dst)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- jpfont_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main():
global pdf
chinesefonts='''
SimSun
SimSun,Bold
SimSun,Italic
SimSun,BoldItalic
SimHei
SimHei,Bold
SimHei,Italic
SimHei,BoldItalic
'''
chinesefonts=chinesefonts.split('\n')
chinesefonts=[i for i in chinesefonts if i]
detail_font=[]
PAGE_HEIGHT = 210
try:
f = open ("mbtext/cp936.txt", "rb")
except:
printf ("error: cannot open 'mbtext/sjis.txt'\n")
return 1
samp_text=f.read(2048)
f.close ()
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# configure pdf-document to be compressed.
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# declaration for using Japanese font, encoding.
HPDF_UseCNSEncodings (pdf)
HPDF_UseCNSFonts (pdf)
for i in chinesefonts:
detail_font.append( HPDF_GetFont (pdf, i, "GB-EUC-H"))
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# create outline root.
root = HPDF_CreateOutline (pdf, NULL, "JP font demo", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
for i in detail_font:
# add a new page object.
page = HPDF_AddPage (pdf)
# create outline entry
outline = HPDF_CreateOutline (pdf, root,
HPDF_Font_GetFontName (i), NULL)
dst = HPDF_Page_CreateDestination (page)
HPDF_Outline_SetDestination(outline, dst)
title_font = HPDF_GetFont (pdf, "Helvetica", NULL)
HPDF_Page_SetFontAndSize (page, title_font, 10)
HPDF_Page_BeginText (page)
# move the position of the text to top of the page.
HPDF_Page_MoveTextPos(page, 10, 190)
HPDF_Page_ShowText (page, HPDF_Font_GetFontName (i))
HPDF_Page_SetFontAndSize (page, i, 15)
HPDF_Page_MoveTextPos (page, 10, -20)
HPDF_Page_ShowText (page, "abcdefghijklmnopqrstuvwxyz")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "1234567890")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_SetFontAndSize (page, i, 10)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -18)
HPDF_Page_SetFontAndSize (page, i, 16)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -27)
HPDF_Page_SetFontAndSize (page, i, 23)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -36)
HPDF_Page_SetFontAndSize (page, i, 30)
HPDF_Page_ShowText (page, samp_text)
p = HPDF_Page_GetCurrentTextPos (page)
# finish to print text.
HPDF_Page_EndText (page)
HPDF_Page_SetLineWidth (page, 0.5)
x_pos = 20
for j in range(len (samp_text) // 2):
HPDF_Page_MoveTo (page, x_pos, p.y - 10)
HPDF_Page_LineTo (page, x_pos, p.y - 12)
HPDF_Page_Stroke (page)
x_pos = x_pos + 30
HPDF_Page_SetWidth (page, p.x + 20)
HPDF_Page_SetHeight (page, PAGE_HEIGHT)
HPDF_Page_MoveTo (page, 10, PAGE_HEIGHT - 25)
HPDF_Page_LineTo (page, p.x + 10, PAGE_HEIGHT - 25)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, PAGE_HEIGHT - 85)
HPDF_Page_LineTo (page, p.x + 10, PAGE_HEIGHT - 85)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, p.y - 12)
HPDF_Page_LineTo (page, p.x + 10, p.y - 12)
HPDF_Page_Stroke (page)
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- encryption.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
text = "This is an encrypt document example."
owner_passwd = "owner"
user_passwd = "user"
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetSize (page, HPDF_PAGE_SIZE_B5, HPDF_PAGE_LANDSCAPE)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 20)
tw = HPDF_Page_TextWidth (page, text)
HPDF_Page_MoveTextPos (page, (HPDF_Page_GetWidth (page) - tw) / 2,
(HPDF_Page_GetHeight (page) - 20) / 2)
HPDF_Page_ShowText (page, text)
HPDF_Page_EndText (page)
HPDF_SetPassword (pdf, owner_passwd, user_passwd)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- ext_gstate_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def draw_circles (page, description, x, y):
HPDF_Page_SetLineWidth (page, 1.0)
HPDF_Page_SetRGBStroke (page, 0.0, 0.0, 0.0)
HPDF_Page_SetRGBFill (page, 1.0, 0.0, 0.0)
HPDF_Page_Circle (page, x + 40, y + 40, 40)
HPDF_Page_ClosePathFillStroke (page)
HPDF_Page_SetRGBFill (page, 0.0, 1.0, 0.0)
HPDF_Page_Circle (page, x + 100, y + 40, 40)
HPDF_Page_ClosePathFillStroke (page)
HPDF_Page_SetRGBFill (page, 0.0, 0.0, 1.0)
HPDF_Page_Circle (page, x + 70, y + 74.64, 40)
HPDF_Page_ClosePathFillStroke (page)
HPDF_Page_SetRGBFill (page, 0.0, 0.0, 0.0)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, x + 0.0, y + 130.0, description)
HPDF_Page_EndText (page)
def main ():
global pdf
PAGE_WIDTH = 600
PAGE_HEIGHT = 900
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
hfont = HPDF_GetFont (pdf, "Helvetica-Bold", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page, hfont, 10)
HPDF_Page_SetHeight (page, PAGE_HEIGHT)
HPDF_Page_SetWidth (page, PAGE_WIDTH)
# normal
HPDF_Page_GSave (page)
draw_circles (page, "normal", 40.0, PAGE_HEIGHT - 170)
HPDF_Page_GRestore (page)
# transparency (0.8)
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetAlphaFill (gstate, 0.8)
HPDF_ExtGState_SetAlphaStroke (gstate, 0.8)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "alpha fill = 0.8", 230.0, PAGE_HEIGHT - 170)
HPDF_Page_GRestore (page)
# transparency (0.4)
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetAlphaFill (gstate, 0.4)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "alpha fill = 0.4", 420.0, PAGE_HEIGHT - 170)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_MULTIPLY
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_MULTIPLY)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_MULTIPLY", 40.0, PAGE_HEIGHT - 340)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_SCREEN
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_SCREEN)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_SCREEN", 230.0, PAGE_HEIGHT - 340)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_OVERLAY
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_OVERLAY)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_OVERLAY", 420.0, PAGE_HEIGHT - 340)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_DARKEN
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_DARKEN)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_DARKEN", 40.0, PAGE_HEIGHT - 510)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_LIGHTEN
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_LIGHTEN)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_LIGHTEN", 230.0, PAGE_HEIGHT - 510)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_COLOR_DODGE
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_COLOR_DODGE)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_COLOR_DODGE", 420.0, PAGE_HEIGHT - 510)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_COLOR_BUM
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_COLOR_BUM)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_COLOR_BUM", 40.0, PAGE_HEIGHT - 680)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_HARD_LIGHT
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_HARD_LIGHT)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_HARD_LIGHT", 230.0, PAGE_HEIGHT - 680)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_SOFT_LIGHT
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_SOFT_LIGHT)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_SOFT_LIGHT", 420.0, PAGE_HEIGHT - 680)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_DIFFERENCE
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_DIFFERENCE)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_DIFFERENCE", 40.0, PAGE_HEIGHT - 850)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_EXCLUSHON
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_EXCLUSHON)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_EXCLUSHON", 230.0, PAGE_HEIGHT - 850)
HPDF_Page_GRestore (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- raw_image_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
RAW_IMAGE_DATA=[
0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfc,
0xff, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xf0,
0xf3, 0xf3, 0xff, 0xe0, 0xf3, 0xf3, 0xff, 0xc0,
0xf3, 0xf3, 0xff, 0x80, 0xf3, 0x33, 0xff, 0x00,
0xf3, 0x33, 0xfe, 0x00, 0xf3, 0x33, 0xfc, 0x00,
0xf8, 0x07, 0xf8, 0x00, 0xf8, 0x07, 0xf0, 0x00,
0xfc, 0xcf, 0xe0, 0x00, 0xfc, 0xcf, 0xc0, 0x00,
0xff, 0xff, 0x80, 0x00, 0xff, 0xff, 0x00, 0x00,
0xff, 0xfe, 0x00, 0x00, 0xff, 0xfc, 0x00, 0x00,
0xff, 0xf8, 0x0f, 0xe0, 0xff, 0xf0, 0x0f, 0xe0,
0xff, 0xe0, 0x0c, 0x30, 0xff, 0xc0, 0x0c, 0x30,
0xff, 0x80, 0x0f, 0xe0, 0xff, 0x00, 0x0f, 0xe0,
0xfe, 0x00, 0x0c, 0x30, 0xfc, 0x00, 0x0c, 0x30,
0xf8, 0x00, 0x0f, 0xe0, 0xf0, 0x00, 0x0f, 0xe0,
0xe0, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
]
def main ():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetWidth (page, 172)
HPDF_Page_SetHeight (page, 80)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_MoveTextPos (page, 220, HPDF_Page_GetHeight (page) - 70)
HPDF_Page_ShowText (page, "RawImageDemo")
HPDF_Page_EndText (page)
# load RGB raw-image file.
image = HPDF_LoadRawImageFromFile (pdf, "rawimage/32_32_rgb.dat",
32, 32, HPDF_CS_DEVICE_RGB)
x = 20
y = 20
# Draw image to the canvas. (normal-mode with actual size.)
HPDF_Page_DrawImage (page, image, x, y, 32, 32)
# load GrayScale raw-image file.
image = HPDF_LoadRawImageFromFile (pdf, "rawimage/32_32_gray.dat",
32, 32, HPDF_CS_DEVICE_GRAY)
x = 70
y = 20
# Draw image to the canvas. (normal-mode with actual size.)
HPDF_Page_DrawImage (page, image, x, y, 32, 32)
# load GrayScale raw-image (1bit) file from memory.
image = HPDF_LoadRawImageFromMem (pdf, RAW_IMAGE_DATA, 32, 32,
HPDF_CS_DEVICE_GRAY, 1)
x = 120
y = 20
# Draw image to the canvas. (normal-mode with actual size.)
HPDF_Page_DrawImage (page, image, x, y, 32, 32)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- jpeg_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def draw_image (pdf, filename, x, y, text):
page = HPDF_GetCurrentPage (pdf)
filename1="images/%s" % filename
image = HPDF_LoadJpegImageFromFile (pdf, filename1)
# Draw image to the canvas.
HPDF_Page_DrawImage (page, image, x, y, HPDF_Image_GetWidth (image),
HPDF_Image_GetHeight (image))
# Print the text.
HPDF_Page_BeginText (page)
HPDF_Page_SetTextLeading (page, 16)
HPDF_Page_MoveTextPos (page, x, y)
HPDF_Page_ShowTextNextLine (page, filename)
HPDF_Page_ShowTextNextLine (page, text)
HPDF_Page_EndText (page)
def main():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetWidth (page, 650)
HPDF_Page_SetHeight (page, 500)
dst = HPDF_Page_CreateDestination (page)
HPDF_Destination_SetXYZ (dst, 0, HPDF_Page_GetHeight (page), 1)
HPDF_SetOpenAction(pdf, dst)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_MoveTextPos (page, 220, HPDF_Page_GetHeight (page) - 70)
HPDF_Page_ShowText (page, "JpegDemo")
HPDF_Page_EndText (page)
HPDF_Page_SetFontAndSize (page, font, 12)
draw_image (pdf, "rgb.jpg", 70, HPDF_Page_GetHeight (page) - 410,
"24bit color image")
draw_image (pdf, "gray.jpg", 340, HPDF_Page_GetHeight (page) - 410,
"8bit grayscale image")
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- outline_demo_jp.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
for i in dir():
if 'CreateOutLine' in i:
print i
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def print_page (page, page_num):
HPDF_Page_SetWidth (page, 200)
HPDF_Page_SetHeight (page, 300)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 50, 250)
buf="Page:%d" % page_num
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
def main():
global pdf
page=[None for i in range(4)]
outline=[None for i in range(4)]
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
try:
f = open ("mbtext/sjis.txt", "rb")
except:
printf ("error: cannot open 'mbtext/sjis.txt'\n")
return 1
SAMP_TXT=f.read(2048)
f.close ()
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# declaration for using Japanese encoding.
HPDF_UseJPEncodings (pdf)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# Add 3 pages to the document.
page[0] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[0], font, 20)
print_page(page[0], 1)
page[1] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[1], font, 20)
print_page(page[1], 2)
page[2] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[2], font, 20)
print_page(page[2], 3)
# create outline root.
root = HPDF_CreateOutLine (pdf, NULL, "OutlineRoot", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
outline[0] = HPDF_CreateOutLine (pdf, root, "page1", NULL)
outline[1] = HPDF_CreateOutLine (pdf, root, "page2", NULL)
# create outline with test which is encoding
outline[2] = HPDF_CreateOutLine (pdf, root, SAMP_TXT,
HPDF_GetEncoder (pdf, "90ms-RKSJ-H"))
# create destination objects on each pages
# and link it to outline items.
dst = HPDF_Page_CreateDestination (page[0])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[0]), 1)
HPDF_Outline_SetDestination(outline[0], dst)
# HPDF_Catalog_SetOpenAction(dst)
dst = HPDF_Page_CreateDestination (page[1])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[1]), 1)
HPDF_Outline_SetDestination(outline[1], dst)
dst = HPDF_Page_CreateDestination (page[2])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[2]), 1)
HPDF_Outline_SetDestination(outline[2], dst)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- text_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
from grid_sheet import *
from math import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def show_stripe_pattern (page, x, y):
iy = 0
while (iy < 50):
HPDF_Page_SetRGBStroke (page, 0.0, 0.0, 0.5)
HPDF_Page_SetLineWidth (page, 1)
HPDF_Page_MoveTo (page, x, y + iy)
HPDF_Page_LineTo (page, x + HPDF_Page_TextWidth (page, "ABCabc123"),
y + iy)
HPDF_Page_Stroke (page)
iy += 3
HPDF_Page_SetLineWidth (page, 2.5)
def show_description (page, x, y, text):
fsize = HPDF_Page_GetCurrentFontSize (page)
font = HPDF_Page_GetCurrentFont (page)
c = HPDF_Page_GetRGBFill (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetRGBFill (page, 0, 0, 0)
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL)
HPDF_Page_SetFontAndSize (page, font, 10)
HPDF_Page_TextOut (page, x, y - 12, text)
HPDF_Page_EndText (page)
HPDF_Page_SetFontAndSize (page, font, fsize)
HPDF_Page_SetRGBFill (page, c.r, c.g, c.b)
def main ():
global pdf
page_title = "Text Demo"
samp_text = "abcdefgABCDEFG123!#$%&+-@?"
samp_text2 = "The quick brown fox jumps over the lazy dog."
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# set compression mode
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
# draw grid to the page
print_grid (pdf, page)
# print the lines of the page.
HPDF_Page_SetLineWidth (page, 1)
HPDF_Page_Rectangle (page, 50, 50, HPDF_Page_GetWidth(page) - 100,
HPDF_Page_GetHeight (page) - 110)
HPDF_Page_Stroke (page)
# print the title of the page (with positioning center).
HPDF_Page_SetFontAndSize (page, font, 24)
tw = HPDF_Page_TextWidth (page, page_title)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, (HPDF_Page_GetWidth(page) - tw) / 2,
HPDF_Page_GetHeight (page) - 50, page_title)
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 60, HPDF_Page_GetHeight(page) - 60)
# font size
fsize = 8
while (fsize < 60):
# set style and size of font.
HPDF_Page_SetFontAndSize(page, font, fsize)
# set the position of the text.
HPDF_Page_MoveTextPos (page, 0, -5 - fsize)
# measure the number of characters which included in the page.
buf= samp_text
length = HPDF_Page_MeasureText (page, samp_text,
HPDF_Page_GetWidth(page) - 120, HPDF_FALSE, NULL)
# truncate the text.
buf='%*s\0' %(int(length), buf)
HPDF_Page_ShowText (page, buf)
# print the description.
HPDF_Page_MoveTextPos (page, 0, -10)
HPDF_Page_SetFontAndSize(page, font, 8)
buf="Fontsize=%.0f" %fsize
HPDF_Page_ShowText (page, buf)
fsize *= 1.5
# font color
HPDF_Page_SetFontAndSize(page, font, 8)
HPDF_Page_MoveTextPos (page, 0, -30)
HPDF_Page_ShowText (page, "Font color")
HPDF_Page_SetFontAndSize (page, font, 18)
HPDF_Page_MoveTextPos (page, 0, -20)
length = len (samp_text)
for i in range(length):
buf=[None ,None]
r = i / float(length)
g = 1 - (i / float(length))
buf[0] = samp_text[i]
buf[1] = '\0'
HPDF_Page_SetRGBFill (page, r, g, 0.0)
HPDF_Page_ShowText (page, buf)
HPDF_Page_MoveTextPos (page, 0, -25)
for i in range(length):
buf=[None ,None]
r = i / float(length)
b = 1 - (i / float(length))
buf[0] = samp_text[i]
buf[1] = '\0'
HPDF_Page_SetRGBFill (page, r, 0.0, b)
HPDF_Page_ShowText (page, buf)
HPDF_Page_MoveTextPos (page, 0, -25)
for i in range(length):
buf=[None ,None]
b = i / float(length)
g = 1 - (i / float(length))
buf[0] = samp_text[i]
buf[1] = '\0'
HPDF_Page_SetRGBFill (page, 0.0, g, b)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
ypos = 450
#
# Font rendering mode
#
HPDF_Page_SetFontAndSize(page, font, 32)
HPDF_Page_SetRGBFill (page, 0.5, 0.5, 0.0)
HPDF_Page_SetLineWidth (page, 1.5)
# PDF_FILL
show_description (page, 60, ypos,
"RenderingMode=PDF_FILL")
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos, "ABCabc123")
HPDF_Page_EndText (page)
# PDF_STROKE
show_description (page, 60, ypos - 50,
"RenderingMode=PDF_STROKE")
HPDF_Page_SetTextRenderingMode (page, HPDF_STROKE)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos - 50, "ABCabc123")
HPDF_Page_EndText (page)
# PDF_FILL_THEN_STROKE
show_description (page, 60, ypos - 100,
"RenderingMode=PDF_FILL_THEN_STROKE")
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL_THEN_STROKE)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos - 100, "ABCabc123")
HPDF_Page_EndText (page)
# PDF_FILL_CLIPPING
show_description (page, 60, ypos - 150,
"RenderingMode=PDF_FILL_CLIPPING")
HPDF_Page_GSave (page)
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL_CLIPPING)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos - 150, "ABCabc123")
HPDF_Page_EndText (page)
show_stripe_pattern (page, 60, ypos - 150)
HPDF_Page_GRestore (page)
# PDF_STROKE_CLIPPING
show_description (page, 60, ypos - 200,
"RenderingMode=PDF_STROKE_CLIPPING")
HPDF_Page_GSave (page)
HPDF_Page_SetTextRenderingMode (page, HPDF_STROKE_CLIPPING)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos - 200, "ABCabc123")
HPDF_Page_EndText (page)
show_stripe_pattern (page, 60, ypos - 200)
HPDF_Page_GRestore (page)
# PDF_FILL_STROKE_CLIPPING
show_description (page, 60, ypos - 250,
"RenderingMode=PDF_FILL_STROKE_CLIPPING")
HPDF_Page_GSave (page)
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL_STROKE_CLIPPING)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos - 250, "ABCabc123")
HPDF_Page_EndText (page)
show_stripe_pattern (page, 60, ypos - 250)
HPDF_Page_GRestore (page)
# Reset text attributes
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL)
HPDF_Page_SetRGBFill (page, 0, 0, 0)
HPDF_Page_SetFontAndSize(page, font, 30)
#
# Rotating text
#
angle1 = 30; # A rotation of 30 degrees.
rad1 = angle1 / 180 * 3.141592; # Calcurate the radian value.
show_description (page, 320, ypos - 60, "Rotating text")
HPDF_Page_BeginText (page)
HPDF_Page_SetTextMatrix (page, cos(rad1), sin(rad1), -sin(rad1), cos(rad1),
330, ypos - 60)
HPDF_Page_ShowText (page, "ABCabc123")
HPDF_Page_EndText (page)
#
# Skewing text.
#
show_description (page, 320, ypos - 120, "Skewing text")
HPDF_Page_BeginText (page)
angle1 = 10
angle2 = 20
rad1 = angle1 / 180 * 3.141592
rad2 = angle2 / 180 * 3.141592
HPDF_Page_SetTextMatrix (page, 1, tan(rad1), tan(rad2), 1, 320, ypos - 120)
HPDF_Page_ShowText (page, "ABCabc123")
HPDF_Page_EndText (page)
#
# scaling text (X direction)
#
show_description (page, 320, ypos - 175, "Scaling text (X direction)")
HPDF_Page_BeginText (page)
HPDF_Page_SetTextMatrix (page, 1.5, 0, 0, 1, 320, ypos - 175)
HPDF_Page_ShowText (page, "ABCabc12")
HPDF_Page_EndText (page)
#
# scaling text (Y direction)
#
show_description (page, 320, ypos - 250, "Scaling text (Y direction)")
HPDF_Page_BeginText (page)
HPDF_Page_SetTextMatrix (page, 1, 0, 0, 2, 320, ypos - 250)
HPDF_Page_ShowText (page, "ABCabc123")
HPDF_Page_EndText (page)
#
# char spacing, word spacing
#
show_description (page, 60, 140, "char-spacing 0")
show_description (page, 60, 100, "char-spacing 1.5")
show_description (page, 60, 60, "char-spacing 1.5, word-spacing 2.5")
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_SetRGBFill (page, 0.1, 0.3, 0.1)
## char-spacing 0
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, 140, samp_text2)
HPDF_Page_EndText (page)
# char-spacing 1.5
HPDF_Page_SetCharSpace (page, 1.5)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, 100, samp_text2)
HPDF_Page_EndText (page)
# char-spacing 1.5, word-spacing 3.5
HPDF_Page_SetWordSpace (page, 2.5)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, 60, samp_text2)
HPDF_Page_EndText (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- font_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from math import *
global pdf
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: error_no=%04X, detail_no=%u\n", error_no,
detail_no)
HPDF_Free (pdf)
sys.exit(1)
font_list=[
"Courier",
"Courier-Bold",
"Courier-Oblique",
"Courier-BoldOblique",
"Helvetica",
"Helvetica-Bold",
"Helvetica-Oblique",
"Helvetica-BoldOblique",
"Times-Roman",
"Times-Bold",
"Times-Italic",
"Times-BoldItalic",
"Symbol",
"ZapfDingbats",
]
def main ():
global pdf
page_title = "Font Demo"
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# Add a new page object.
page = HPDF_AddPage (pdf)
height = HPDF_Page_GetHeight (page)
width = HPDF_Page_GetWidth (page)
# Print the lines of the page.
HPDF_Page_SetLineWidth (page, 1)
HPDF_Page_Rectangle (page, 50, 50, width - 100, height - 110)
HPDF_Page_Stroke (page)
# Print the title of the page (with positioning center).
def_font = HPDF_GetFont (pdf, "Helvetica", NULL)
HPDF_Page_SetFontAndSize (page, def_font, 24)
tw = HPDF_Page_TextWidth (page, page_title)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, (width - tw) / 2, height - 50, page_title)
HPDF_Page_EndText (page)
# output subtitle.
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, def_font, 16)
HPDF_Page_TextOut (page, 60, height - 80, "<Standerd Type1 fonts samples>")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 60, height - 105)
for i in font_list:
samp_text = "abcdefgABCDEFG12345!#$%&+-@?"
#font_name = HPDF_LoadTTFontFromFile (pdf, "c:/winnt/fonts/arial.ttf", HPDF_TRUE);
#font_name='arial.ttf'
#font = HPDF_GetFont (pdf, font_name, "CP1250");
#font = HPDF_GetFont (pdf, "Helvetica", NULL)
font = HPDF_GetFont (pdf, i, NULL)
# print a label of text
HPDF_Page_SetFontAndSize (page, def_font, 9)
HPDF_Page_ShowText (page, i)
HPDF_Page_MoveTextPos (page, 0, -18)
# print a sample text.
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_EndText (page)
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- encoding_list.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
PAGE_WIDTH = 420
PAGE_HEIGHT = 400
CELL_WIDTH = 20
CELL_HEIGHT = 20
CELL_HEADER = 10
def draw_graph (page):
# Draw 16 X 15 cells
# Draw vertical lines.
HPDF_Page_SetLineWidth (page, 0.5)
for i in range(18):
x = i * CELL_WIDTH + 40
HPDF_Page_MoveTo (page, x, PAGE_HEIGHT - 60)
HPDF_Page_LineTo (page, x, 40)
HPDF_Page_Stroke (page)
if (i > 0 and i <= 16):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x + 5, PAGE_HEIGHT - 75)
buf="%X" %(i - 1)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
# Draw horizontal lines.
for i in range(16):
y = i * CELL_HEIGHT + 40
HPDF_Page_MoveTo (page, 40, y)
HPDF_Page_LineTo (page, PAGE_WIDTH - 40, y)
HPDF_Page_Stroke (page)
if (i < 14):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 45, y + 5)
buf="%X" %( 15 - i)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
def draw_fonts (page):
HPDF_Page_BeginText (page)
# Draw all character from 0x20 to 0xFF to the canvas.
for i in range(1,17):
for j in range(1,17):
buf=[None, None]
y = PAGE_HEIGHT - 55 - ((i - 1) * CELL_HEIGHT)
x = j * CELL_WIDTH + 50
buf[1] = 0x00
buf[0] = (i - 1) * 16 + (j - 1)
if (buf[0] >= 32):
d = x - HPDF_Page_TextWidth (page, buf) / 2
HPDF_Page_TextOut (page, d, y, buf)
HPDF_Page_EndText (page)
def main ():
encodings=[
"StandardEncoding",
"MacRomanEncoding",
"WinAnsiEncoding",
"ISO8859-2",
"ISO8859-3",
"ISO8859-4",
"ISO8859-5",
"ISO8859-9",
"ISO8859-10",
"ISO8859-13",
"ISO8859-14",
"ISO8859-15",
"ISO8859-16",
"CP1250",
"CP1251",
"CP1252",
"CP1254",
"CP1257",
"KOI8-R",
"Symbol-Set",
"ZapfDingbats-Set",
NULL
]
pdf = HPDF_NewEx (error_handler, NULL, NULL, 0, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
# set compression mode
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# get default font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# load font object
font_name = HPDF_LoadType1FontFromFile (pdf, "type1/a010013l.afm",
"type1/a010013l.pfb")
# create outline root.
root = HPDF_CreateOutline (pdf, NULL, "Encoding list", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
i=0
while (encodings[i]):
page = HPDF_AddPage (pdf)
HPDF_Page_SetWidth (page, PAGE_WIDTH)
HPDF_Page_SetHeight (page, PAGE_HEIGHT)
outline = HPDF_CreateOutline (pdf, root, encodings[i], NULL)
dst = HPDF_Page_CreateDestination (page)
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page), 1)
# HPDF_Destination_SetFitB(dst);
HPDF_Outline_SetDestination(outline, dst)
HPDF_Page_SetFontAndSize (page, font, 15)
draw_graph (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_MoveTextPos (page, 40, PAGE_HEIGHT - 50)
HPDF_Page_ShowText (page, encodings[i])
HPDF_Page_ShowText (page, " Encoding")
HPDF_Page_EndText (page)
if encodings[i]=="Symbol-Set":
font2 = HPDF_GetFont (pdf, "Symbol", NULL)
elif encodings[i]=="ZapfDingbats-Set":
font2 = HPDF_GetFont (pdf, "ZapfDingbats", NULL)
else:
font2 = HPDF_GetFont (pdf, font_name, encodings[i])
HPDF_Page_SetFontAndSize (page, font2, 14)
draw_fonts (page)
i+=1
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- ttfont_demo_jp.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
if (len(sys.argv) < 2):
printf ("ttfont_demo_cn <ttf-font-filename> [-E]\n")
printf ("ttfont_demo_cn <ttc-font-filename> <index> [-E]\n")
print
printf (r"for example, ttfont_demo_cn.py c:\winnt\fonts\simfang.ttf -E")
return 1
try:
f = open ("mbtext/cp936.txt", "rb")
except:
printf ("error: cannot open 'mbtext/cp936.txt'\n")
return 1
SAMP_TXT=f.read(2048)
f.close()
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# declaration for using Japanese encoding.
HPDF_UseCNSEncodings (pdf)
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# load ttc file
if len(sys.argv) == 4 and sys.argv[3]=="-E":
detail_font_name = HPDF_LoadTTFontFromFile2 (pdf, sys.argv[1],
int (sys.argv[2]), HPDF_TRUE)
elif len(sys.argv) == 3 and sys.argv[2]=="-E":
detail_font_name = HPDF_LoadTTFontFromFile (pdf, sys.argv[1], HPDF_TRUE)
elif len(sys.argv) == 3:
detail_font_name = HPDF_LoadTTFontFromFile2 (pdf, sys.argv[1],
int (sys.argv[2]), HPDF_FALSE)
else:
detail_font_name = HPDF_LoadTTFontFromFile (pdf, sys.argv[1], HPDF_FALSE)
# add a new page object.
page = HPDF_AddPage (pdf)
title_font = HPDF_GetFont (pdf, "Helvetica", NULL)
detail_font = HPDF_GetFont (pdf, detail_font_name, "GB-EUC-H")
HPDF_Page_SetFontAndSize (page, title_font, 10)
HPDF_Page_BeginText (page)
# move the position of the text to top of the page.
HPDF_Page_MoveTextPos(page, 10, 190)
HPDF_Page_ShowText (page, detail_font_name)
HPDF_Page_ShowText (page, " (")
HPDF_Page_ShowText (page, HPDF_Font_GetEncodingName (detail_font))
HPDF_Page_ShowText (page, ")")
HPDF_Page_SetFontAndSize (page, detail_font, 15)
HPDF_Page_MoveTextPos (page, 10, -20)
HPDF_Page_ShowText (page, "abcdefghijklmnopqrstuvwxyz")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "1234567890")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_SetFontAndSize (page, detail_font, 10)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -18)
HPDF_Page_SetFontAndSize (page, detail_font, 16)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -27)
HPDF_Page_SetFontAndSize (page, detail_font, 23)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -36)
HPDF_Page_SetFontAndSize (page, detail_font, 30)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -36)
pw = HPDF_Page_TextWidth (page, SAMP_TXT)
page_height = 210
page_width = pw + 40
HPDF_Page_SetWidth (page, page_width)
HPDF_Page_SetHeight (page, page_height)
# finish to print text.
HPDF_Page_EndText (page)
HPDF_Page_SetLineWidth (page, 0.5)
HPDF_Page_MoveTo (page, 10, page_height - 25)
HPDF_Page_LineTo (page, page_width - 10, page_height - 25)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, page_height - 85)
HPDF_Page_LineTo (page, page_width - 10, page_height - 85)
HPDF_Page_Stroke (page)
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- chfont_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
from grid_sheet import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
if (len(sys.argv) < 4):
printf ("chfont_demo <cp936-ttc-font-file-name> "
"<cp936-index> <cp932-ttc-font-file-name> <cp932-index>\n")
return 1
fname="mbtext/%s"% "cp932.txt"
cp932 = open (fname, "rb")
if (not cp932):
printf ("error: cannot open cp932.txt\n")
return 1
fname= "mbtext/%s" % "cp936.txt"
cp936 = open (fname, "rb")
if (not cp936):
printf ("error: cannot open cp936.txt\n")
return 1
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
HPDF_UseJPEncodings (pdf)
HPDF_UseCNSEncodings (pdf)
fcp936_name = HPDF_LoadTTFontFromFile2 (pdf, sys.argv[1], int(sys.argv[2]),
HPDF_TRUE)
fcp932_name = HPDF_LoadTTFontFromFile2 (pdf, sys.argv[3], int(sys.argv[4]),
HPDF_TRUE)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetHeight (page, 300)
HPDF_Page_SetWidth (page, 550)
fcp936 = HPDF_GetFont (pdf, fcp936_name, "GBK-EUC-H")
fcp932 = HPDF_GetFont (pdf, fcp932_name, "90ms-RKSJ-H")
print_grid (pdf, page)
HPDF_Page_SetTextLeading (page, 20)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 50, 250)
HPDF_Page_SetTextLeading (page, 25)
buf=cp936.read(1024)
while buf:
HPDF_Page_SetFontAndSize (page, fcp936, 18)
buf ='%s\0' % buf
HPDF_Page_ShowText (page, buf)
buf=cp936.read(1024)
if buf:
HPDF_Page_SetFontAndSize (page, fcp932, 18)
buf ='%s\0' % buf
HPDF_Page_ShowText (page, buf)
HPDF_Page_MoveToNextLine (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
cp936.close ()
cp932.close ()
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- jpfont_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main():
global pdf
detail_font=[None for i in range(16)]
PAGE_HEIGHT = 210
try:
f = open ("mbtext/sjis.txt", "rb")
except:
printf ("error: cannot open 'mbtext/sjis.txt'\n")
return 1
samp_text=f.read(2048)
f.close ()
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# configure pdf-document to be compressed.
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# declaration for using Japanese font, encoding.
HPDF_UseJPEncodings (pdf)
HPDF_UseJPFonts (pdf)
detail_font[0] = HPDF_GetFont (pdf, "MS-Mincyo", "90ms-RKSJ-H")
detail_font[1] = HPDF_GetFont (pdf, "MS-Mincyo,Bold", "90ms-RKSJ-H")
detail_font[2] = HPDF_GetFont (pdf, "MS-Mincyo,Italic", "90ms-RKSJ-H")
detail_font[3] = HPDF_GetFont (pdf, "MS-Mincyo,BoldItalic", "90ms-RKSJ-H")
detail_font[4] = HPDF_GetFont (pdf, "MS-PMincyo", "90msp-RKSJ-H")
detail_font[5] = HPDF_GetFont (pdf, "MS-PMincyo,Bold", "90msp-RKSJ-H")
detail_font[6] = HPDF_GetFont (pdf, "MS-PMincyo,Italic", "90msp-RKSJ-H")
detail_font[7] = HPDF_GetFont (pdf, "MS-PMincyo,BoldItalic",
"90msp-RKSJ-H")
detail_font[8] = HPDF_GetFont (pdf, "MS-Gothic", "90ms-RKSJ-H")
detail_font[9] = HPDF_GetFont (pdf, "MS-Gothic,Bold", "90ms-RKSJ-H")
detail_font[10] = HPDF_GetFont (pdf, "MS-Gothic,Italic", "90ms-RKSJ-H")
detail_font[11] = HPDF_GetFont (pdf, "MS-Gothic,BoldItalic", "90ms-RKSJ-H")
detail_font[12] = HPDF_GetFont (pdf, "MS-PGothic", "90msp-RKSJ-H")
detail_font[13] = HPDF_GetFont (pdf, "MS-PGothic,Bold", "90msp-RKSJ-H")
detail_font[14] = HPDF_GetFont (pdf, "MS-PGothic,Italic", "90msp-RKSJ-H")
detail_font[15] = HPDF_GetFont (pdf, "MS-PGothic,BoldItalic",
"90msp-RKSJ-H")
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# create outline root.
root = HPDF_CreateOutline (pdf, NULL, "JP font demo", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
for i in range(16):
# add a new page object.
page = HPDF_AddPage (pdf)
# create outline entry
outline = HPDF_CreateOutline (pdf, root,
HPDF_Font_GetFontName (detail_font[i]), NULL)
dst = HPDF_Page_CreateDestination (page)
HPDF_Outline_SetDestination(outline, dst)
title_font = HPDF_GetFont (pdf, "Helvetica", NULL)
HPDF_Page_SetFontAndSize (page, title_font, 10)
HPDF_Page_BeginText (page)
# move the position of the text to top of the page.
HPDF_Page_MoveTextPos(page, 10, 190)
HPDF_Page_ShowText (page, HPDF_Font_GetFontName (detail_font[i]))
HPDF_Page_SetFontAndSize (page, detail_font[i], 15)
HPDF_Page_MoveTextPos (page, 10, -20)
HPDF_Page_ShowText (page, "abcdefghijklmnopqrstuvwxyz")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "1234567890")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_SetFontAndSize (page, detail_font[i], 10)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -18)
HPDF_Page_SetFontAndSize (page, detail_font[i], 16)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -27)
HPDF_Page_SetFontAndSize (page, detail_font[i], 23)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -36)
HPDF_Page_SetFontAndSize (page, detail_font[i], 30)
HPDF_Page_ShowText (page, samp_text)
p = HPDF_Page_GetCurrentTextPos (page)
# finish to print text.
HPDF_Page_EndText (page)
HPDF_Page_SetLineWidth (page, 0.5)
x_pos = 20
for j in range(len (samp_text) // 2):
HPDF_Page_MoveTo (page, x_pos, p.y - 10)
HPDF_Page_LineTo (page, x_pos, p.y - 12)
HPDF_Page_Stroke (page)
x_pos = x_pos + 30
HPDF_Page_SetWidth (page, p.x + 20)
HPDF_Page_SetHeight (page, PAGE_HEIGHT)
HPDF_Page_MoveTo (page, 10, PAGE_HEIGHT - 25)
HPDF_Page_LineTo (page, p.x + 10, PAGE_HEIGHT - 25)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, PAGE_HEIGHT - 85)
HPDF_Page_LineTo (page, p.x + 10, PAGE_HEIGHT - 85)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, p.y - 12)
HPDF_Page_LineTo (page, p.x + 10, p.y - 12)
HPDF_Page_Stroke (page)
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- image_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
from math import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def show_description (page, x, y, text):
HPDF_Page_MoveTo (page, x, y - 10)
HPDF_Page_LineTo (page, x, y + 10)
HPDF_Page_MoveTo (page, x - 10, y)
HPDF_Page_LineTo (page, x + 10, y)
HPDF_Page_Stroke (page)
HPDF_Page_SetFontAndSize (page, HPDF_Page_GetCurrentFont (page), 8)
HPDF_Page_SetRGBFill (page, 0, 0, 0)
HPDF_Page_BeginText (page)
buf="(x=%d,y=%d)" % (int(x), int(y))
HPDF_Page_MoveTextPos (page, x - HPDF_Page_TextWidth (page, buf) - 5,
y - 10)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x - 20, y - 25)
HPDF_Page_ShowText (page, text)
HPDF_Page_EndText (page)
def main ():
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetWidth (page, 550)
HPDF_Page_SetHeight (page, 500)
dst = HPDF_Page_CreateDestination (page)
HPDF_Destination_SetXYZ (dst, 0, HPDF_Page_GetHeight (page), 1)
HPDF_SetOpenAction(pdf, dst)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_MoveTextPos (page, 220, HPDF_Page_GetHeight (page) - 70)
HPDF_Page_ShowText (page, "ImageDemo")
HPDF_Page_EndText (page)
# load image file.
image = HPDF_LoadPngImageFromFile (pdf, "pngsuite/basn3p02.png")
# image1 is masked by image2.
image1 = HPDF_LoadPngImageFromFile (pdf, "pngsuite/basn3p02.png")
# image2 is a mask image.
image2 = HPDF_LoadPngImageFromFile (pdf, "pngsuite/basn0g01.png")
# image3 is a RGB-color image. we use this image for color-mask
# * demo.
image3 = HPDF_LoadPngImageFromFile (pdf, "pngsuite/maskimage.png")
iw = HPDF_Image_GetWidth (image)
ih = HPDF_Image_GetHeight (image)
HPDF_Page_SetLineWidth (page, 0.5)
x = 100
y = HPDF_Page_GetHeight (page) - 150
# Draw image to the canvas. (normal-mode with actual size.)
HPDF_Page_DrawImage (page, image, x, y, iw, ih)
show_description (page, x, y, "Actual Size")
x += 150
# Scalling image (X direction)
HPDF_Page_DrawImage (page, image, x, y, iw * 1.5, ih)
show_description (page, x, y, "Scalling image (X direction)")
x += 150
# Scalling image (Y direction).
HPDF_Page_DrawImage (page, image, x, y, iw, ih * 1.5)
show_description (page, x, y, "Scalling image (Y direction)")
x = 100
y -= 120
# Skewing image.
angle1 = 10
angle2 = 20
rad1 = angle1 / 180 * 3.141592
rad2 = angle2 / 180 * 3.141592
HPDF_Page_GSave (page)
HPDF_Page_Concat (page, iw, tan(rad1) * iw, tan(rad2) * ih, ih, x, y)
HPDF_Page_ExecuteXObject (page, image)
HPDF_Page_GRestore (page)
show_description (page, x, y, "Skewing image")
x += 150
# Rotating image
angle = 30; # rotation of 30 degrees.
rad = angle / 180 * 3.141592; # Calcurate the radian value.
HPDF_Page_GSave (page)
HPDF_Page_Concat (page, iw * cos(rad),
iw * sin(rad),
ih * -sin(rad),
ih * cos(rad),
x, y)
HPDF_Page_ExecuteXObject (page, image)
HPDF_Page_GRestore (page)
show_description (page, x, y, "Rotating image")
x += 150
# draw masked image.
# Set image2 to the mask image of image1
HPDF_Image_SetMaskImage (image1, image2)
HPDF_Page_SetRGBFill (page, 0, 0, 0)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x - 6, y + 14)
HPDF_Page_ShowText (page, "MASKMASK")
HPDF_Page_EndText (page)
HPDF_Page_DrawImage (page, image1, x - 3, y - 3, iw + 6, ih + 6)
show_description (page, x, y, "masked image")
x = 100
y -= 120
# color mask.
HPDF_Page_SetRGBFill (page, 0, 0, 0)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x - 6, y + 14)
HPDF_Page_ShowText (page, "MASKMASK")
HPDF_Page_EndText (page)
HPDF_Image_SetColorMask (image3, 0, 255, 0, 0, 0, 255)
HPDF_Page_DrawImage (page, image3, x, y, iw, ih)
show_description (page, x, y, "Color Mask")
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
if HPDF_NOPNGLIB:
printf("WARNING: if you want to run this demo, \n"
"make libhpdf with HPDF_USE_PNGLIB option.\n")
sys.exit(1)
else:
main() | Python |
###
## * << Haru Free PDF Library 2.0.6 >> -- slideshow_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
import random
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def print_page (page, caption, font, style, prev, next):
r = random.random()
g = random.random()
b = random.random()
rect=HPDF_Rect()
HPDF_Page_SetWidth (page, 800)
HPDF_Page_SetHeight (page, 600)
HPDF_Page_SetRGBFill (page, r, g, b)
HPDF_Page_Rectangle (page, 0, 0, 800, 600)
HPDF_Page_Fill (page)
HPDF_Page_SetRGBFill (page, 1.0 - r, 1.0 - g, 1.0 - b)
HPDF_Page_SetFontAndSize (page, font, 30)
HPDF_Page_BeginText (page)
HPDF_Page_SetTextMatrix (page, 0.8, 0.0, 0.0, 1.0, 0.0, 0.0)
HPDF_Page_TextOut (page, 50, 530, caption)
HPDF_Page_SetTextMatrix (page, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_TextOut (page, 55, 300,
"Type \"Ctrl+L\" in order to return from full screen mode.")
HPDF_Page_EndText (page)
HPDF_Page_SetSlideShow (page, style, 5.0, 1.0)
HPDF_Page_SetFontAndSize (page, font, 20)
if (next):
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 680, 50, "Next=>")
HPDF_Page_EndText (page)
rect.left = 680
rect.right = 750
rect.top = 70
rect.bottom = 50
dst = HPDF_Page_CreateDestination (next)
HPDF_Destination_SetFit(dst)
annot = HPDF_Page_CreateLinkAnnot (page, rect, dst)
HPDF_LinkAnnot_SetBorderStyle (annot, 0, 0, 0)
HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_INVERT_BOX)
if (prev):
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 50, 50, "<=Prev")
HPDF_Page_EndText (page)
rect.left = 50
rect.right = 110
rect.top = 70
rect.bottom = 50
dst = HPDF_Page_CreateDestination (prev)
HPDF_Destination_SetFit(dst)
annot = HPDF_Page_CreateLinkAnnot (page, rect, dst)
HPDF_LinkAnnot_SetBorderStyle (annot, 0, 0, 0)
HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_INVERT_BOX)
def main():
global pdf
page=[None for i in range(17)]
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# create default-font
font = HPDF_GetFont (pdf, "Courier", NULL)
# Add 17 pages to the document.
page[0] = HPDF_AddPage (pdf)
page[1] = HPDF_AddPage (pdf)
page[2] = HPDF_AddPage (pdf)
page[3] = HPDF_AddPage (pdf)
page[4] = HPDF_AddPage (pdf)
page[5] = HPDF_AddPage (pdf)
page[6] = HPDF_AddPage (pdf)
page[7] = HPDF_AddPage (pdf)
page[8] = HPDF_AddPage (pdf)
page[9] = HPDF_AddPage (pdf)
page[10] = HPDF_AddPage (pdf)
page[11] = HPDF_AddPage (pdf)
page[12] = HPDF_AddPage (pdf)
page[13] = HPDF_AddPage (pdf)
page[14] = HPDF_AddPage (pdf)
page[15] = HPDF_AddPage (pdf)
page[16] = HPDF_AddPage (pdf)
print_page(page[0], "HPDF_TS_WIPE_RIGHT", font,
HPDF_TS_WIPE_RIGHT, NULL, page[1])
print_page(page[1], "HPDF_TS_WIPE_UP", font,
HPDF_TS_WIPE_UP, page[0], page[2])
print_page(page[2], "HPDF_TS_WIPE_LEFT", font,
HPDF_TS_WIPE_LEFT, page[1], page[3])
print_page(page[3], "HPDF_TS_WIPE_DOWN", font,
HPDF_TS_WIPE_DOWN, page[2], page[4])
print_page(page[4], "HPDF_TS_BARN_DOORS_HORIZONTAL_OUT", font,
HPDF_TS_BARN_DOORS_HORIZONTAL_OUT, page[3], page[5])
print_page(page[5], "HPDF_TS_BARN_DOORS_HORIZONTAL_IN", font,
HPDF_TS_BARN_DOORS_HORIZONTAL_IN, page[4], page[6])
print_page(page[6], "HPDF_TS_BARN_DOORS_VERTICAL_OUT", font,
HPDF_TS_BARN_DOORS_VERTICAL_OUT, page[5], page[7])
print_page(page[7], "HPDF_TS_BARN_DOORS_VERTICAL_IN", font,
HPDF_TS_BARN_DOORS_VERTICAL_IN, page[6], page[8])
print_page(page[8], "HPDF_TS_BOX_OUT", font,
HPDF_TS_BOX_OUT, page[7], page[9])
print_page(page[9], "HPDF_TS_BOX_IN", font,
HPDF_TS_BOX_IN, page[8], page[10])
print_page(page[10], "HPDF_TS_BLINDS_HORIZONTAL", font,
HPDF_TS_BLINDS_HORIZONTAL, page[9], page[11])
print_page(page[11], "HPDF_TS_BLINDS_VERTICAL", font,
HPDF_TS_BLINDS_VERTICAL, page[10], page[12])
print_page(page[12], "HPDF_TS_DISSOLVE", font,
HPDF_TS_DISSOLVE, page[11], page[13])
print_page(page[13], "HPDF_TS_GLITTER_RIGHT", font,
HPDF_TS_GLITTER_RIGHT, page[12], page[14])
print_page(page[14], "HPDF_TS_GLITTER_DOWN", font,
HPDF_TS_GLITTER_DOWN, page[13], page[15])
print_page(page[15], "HPDF_TS_GLITTER_TOP_LEFT_TO_BOTTOM_RIGHT", font,
HPDF_TS_GLITTER_TOP_LEFT_TO_BOTTOM_RIGHT, page[14], page[16])
print_page(page[16], "HPDF_TS_REPLACE", font,
HPDF_TS_REPLACE, page[15], NULL)
HPDF_SetPageMode (pdf, HPDF_PAGE_MODE_FULL_SCREEN)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
#coding=utf-8
###
## * << Haru Free PDF Library 2.0.0 >> -- text_annotation.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main():
rect1 =HPDF_Rect(50, 350, 150, 400)
rect2 =HPDF_Rect(210, 350, 350, 400)
rect3 =HPDF_Rect(50, 250, 150, 300)
rect4 =HPDF_Rect(210, 250, 350, 300)
rect5 =HPDF_Rect(50, 150, 150, 200)
rect6 =HPDF_Rect(210, 150, 350, 200)
rect7 =HPDF_Rect(50, 50, 150, 100)
rect8 =HPDF_Rect(210, 50, 350, 100)
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# use Times-Roman font.
font = HPDF_GetFont (pdf, "Times-Roman", "WinAnsiEncoding")
page = HPDF_AddPage (pdf)
HPDF_Page_SetWidth (page, 400)
HPDF_Page_SetHeight (page, 500)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 16)
HPDF_Page_MoveTextPos (page, 130, 450)
HPDF_Page_ShowText (page, "Annotation Demo")
HPDF_Page_EndText (page)
annot = HPDF_Page_CreateTextAnnot (page, rect1, "Annotation with Comment "
"Icon. \n This annotation set to be opened initially.",
NULL)
HPDF_TextAnnot_SetIcon (annot, HPDF_ANNOT_ICON_COMMENT)
HPDF_TextAnnot_SetOpened (annot, HPDF_TRUE)
annot = HPDF_Page_CreateTextAnnot (page, rect2,
"Annotation with Key Icon", NULL)
HPDF_TextAnnot_SetIcon (annot, HPDF_ANNOT_ICON_PARAGRAPH)
annot = HPDF_Page_CreateTextAnnot (page, rect3,
"Annotation with Note Icon", NULL)
HPDF_TextAnnot_SetIcon (annot, HPDF_ANNOT_ICON_NOTE)
annot = HPDF_Page_CreateTextAnnot (page, rect4,
"Annotation with Help Icon", NULL)
HPDF_TextAnnot_SetIcon (annot, HPDF_ANNOT_ICON_HELP)
annot = HPDF_Page_CreateTextAnnot (page, rect5,
"Annotation with NewParagraph Icon", NULL)
HPDF_TextAnnot_SetIcon (annot, HPDF_ANNOT_ICON_NEW_PARAGRAPH)
annot = HPDF_Page_CreateTextAnnot (page, rect6,
"Annotation with Paragraph Icon", NULL)
HPDF_TextAnnot_SetIcon (annot, HPDF_ANNOT_ICON_PARAGRAPH)
annot = HPDF_Page_CreateTextAnnot (page, rect7,
"Annotation with Insert Icon", NULL)
HPDF_TextAnnot_SetIcon (annot, HPDF_ANNOT_ICON_INSERT)
encoding = HPDF_GetEncoder (pdf, "ISO8859-2")
#HPDF_Page_CreateTextAnnot (page, rect8,
# "Annotation with ISO8859 text 釉罩棕?, encoding)
HPDF_Page_SetFontAndSize (page, font, 11)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect1.left + 35, rect1.top - 20)
HPDF_Page_ShowText (page, "Comment Icon.")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect2.left + 35, rect2.top - 20)
HPDF_Page_ShowText (page, "Key Icon")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect3.left + 35, rect3.top - 20)
HPDF_Page_ShowText (page, "Note Icon.")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect4.left + 35, rect4.top - 20)
HPDF_Page_ShowText (page, "Help Icon")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect5.left + 35, rect5.top - 20)
HPDF_Page_ShowText (page, "NewParagraph Icon")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect6.left + 35, rect6.top - 20)
HPDF_Page_ShowText (page, "Paragraph Icon")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect7.left + 35, rect7.top - 20)
HPDF_Page_ShowText (page, "Insert Icon")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect8.left + 35, rect8.top - 20)
HPDF_Page_ShowText (page, "Text Icon(ISO8859-2 text)")
HPDF_Page_EndText (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- character_map.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
## * usage character_map <encoding-name> <low-range-from> <low-range-to>
## * <high-range-from> <high-range-to>
## * ex. character_map 90ms-RKSJ-V 0x80 0x
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def draw_page(pdf, page, title_font, font, h_byte, l_byte):
PAGE_WIDTH = 420
CELL_HEIGHT = 20
CELL_WIDTH = 20
l_byte = int(l_byte / 16) * 16
h_count = 16 - (l_byte / 16)
page_height = 40 + 40 + (h_count + 1) * CELL_HEIGHT
HPDF_Page_SetHeight (page, page_height)
HPDF_Page_SetWidth (page, PAGE_WIDTH)
HPDF_Page_SetFontAndSize (page, title_font, 10)
ypos = h_count + 1
while True:
y = (ypos) * CELL_HEIGHT + 40
HPDF_Page_MoveTo (page, 40, y)
HPDF_Page_LineTo (page, 380, y)
HPDF_Page_Stroke (page)
if (ypos < h_count):
buf=[None,None]
buf[0] = 16 - ypos - 1
if (buf[0] < 10):
buf[0] += ord('0')
else:
buf[0] += (ord('A') - 10)
buf[1] = 0
buf=[i % 256 for i in buf] #because buf is unsigned char *
w = HPDF_Page_TextWidth (page, buf)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 40 + (20 - w) / 2, y + 5)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
if (ypos == 0):
break
ypos-=1
for xpos in range(18):
y = (h_count + 1) * CELL_HEIGHT + 40
x = xpos * CELL_WIDTH + 40
HPDF_Page_MoveTo (page, x, 40)
HPDF_Page_LineTo (page, x, y)
HPDF_Page_Stroke (page)
if (xpos > 0 and xpos <= 16):
buf=[None,None]
buf[0] = xpos - 1
if (buf[0] < 10):
buf[0] += ord('0')
else:
buf[0] += (ord('A') - 10)
buf[1] = 0
buf=[i % 256 for i in buf] #because buf is unsigned char *
w = HPDF_Page_TextWidth(page, buf)
HPDF_Page_BeginText(page)
HPDF_Page_MoveTextPos(page, x + (20 - w) / 2,
h_count * CELL_HEIGHT + 45)
HPDF_Page_ShowText(page, buf)
HPDF_Page_EndText(page)
HPDF_Page_SetFontAndSize (page, font, 15)
ypos = h_count
while True:
y = (ypos - 1) * CELL_HEIGHT + 45
for xpos in range(16):
buf=[None for i in range(3)]
x = xpos * CELL_WIDTH + 40 + CELL_WIDTH
buf[0] = h_byte
buf[1] = (16 - ypos) * 16 + xpos
buf[2] = 0x00
buf=[i % 256 for i in buf] #because buf is unsigned char *
w = HPDF_Page_TextWidth(page, buf)
if (w > 0):
HPDF_Page_BeginText(page)
HPDF_Page_MoveTextPos(page, x + (20 - w) / 2, y)
HPDF_Page_ShowText(page, buf)
HPDF_Page_EndText(page)
if (ypos == 0):
break
ypos-=1
def main ():
global pdf
flg=[HPDF_UINT16(0) for i in range(256)]
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
if (len(sys.argv) < 3):
printf ("usage: character_map <encoding-name> <font-name>\n")
printf ('for example, character_map.py GBK-EUC-H SimHei,Bold')
return 1
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# configure pdf-document (showing outline, compression enabled)
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
HPDF_SetPagesConfiguration (pdf, 10)
HPDF_UseJPEncodings (pdf)
HPDF_UseJPFonts (pdf)
HPDF_UseKREncodings (pdf)
HPDF_UseKRFonts (pdf)
HPDF_UseCNSEncodings (pdf)
HPDF_UseCNSFonts (pdf)
HPDF_UseCNTEncodings (pdf)
HPDF_UseCNTFonts (pdf)
encoder = HPDF_GetEncoder (pdf, sys.argv[1])
if (HPDF_Encoder_GetType (encoder) != HPDF_ENCODER_TYPE_DOUBLE_BYTE):
printf ("error: %s is not cmap-encoder\n", sys.argv[1])
HPDF_Free (pdf)
return 1
font = HPDF_GetFont (pdf, sys.argv[2], sys.argv[1])
min_l = 255
min_h = 256
max_l = 0
max_h = 0
for i in range(256):
for j in range(20, 256):
buf=[None, None ,None]
code = i * 256 + j
buf[0] = i
buf[1] = j
buf[2] = 0
btype = HPDF_Encoder_GetByteType (encoder, buf, 0)
unicode = HPDF_Encoder_GetUnicode (encoder, code)
if (btype == HPDF_BYTE_TYPE_LEAD and
unicode != 0x25A1):
if (min_l > j):
min_l = j
if (max_l < j):
max_l = j
if (min_h > i):
min_h = i
if (max_h < i):
max_h = i
flg[i] = 1
printf ("min_h=%04X max_h=%04X min_l=%04X max_l=%04X\n",
min_h, max_h, min_l, max_l)
# create outline root.
root = HPDF_CreateOutline (pdf, NULL, sys.argv[1], NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
for i in range(256):
if (flg[i]):
page = HPDF_AddPage (pdf)
title_font = HPDF_GetFont (pdf, "Helvetica", NULL)
buf="0x%04X-0x%04X" %(
(i * 256 + min_l),
(i * 256 + max_l)
)
outline = HPDF_CreateOutline (pdf, root, buf, NULL)
dst = HPDF_Page_CreateDestination (page)
HPDF_Outline_SetDestination(outline, dst)
draw_page (pdf, page, title_font, font, i, min_l)
buf="%s (%s) 0x%04X-0x%04X" %(
sys.argv[1],
sys.argv[2],
(i * 256 + min_l),
(i * 256 + max_l)
)
HPDF_Page_SetFontAndSize (page, title_font, 10)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 40, HPDF_Page_GetHeight (page) - 35)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
HPDF_SaveToFile (pdf, fname)
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- line_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def draw_line (page, x, y, label):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x, y - 10)
HPDF_Page_ShowText (page, label)
HPDF_Page_EndText (page)
HPDF_Page_MoveTo (page, x, y - 15)
HPDF_Page_LineTo (page, x + 220, y - 15)
HPDF_Page_Stroke (page)
def draw_line2 (page, x, y, label):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x, y)
HPDF_Page_ShowText (page, label)
HPDF_Page_EndText (page)
HPDF_Page_MoveTo (page, x + 30, y - 25)
HPDF_Page_LineTo (page, x + 160, y - 25)
HPDF_Page_Stroke (page)
def draw_rect (page, x, y, label):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x, y - 10)
HPDF_Page_ShowText (page, label)
HPDF_Page_EndText (page)
HPDF_Page_Rectangle(page, x, y - 40, 220, 25)
def main ():
global pdf
page_title = "Line Example"
DASH_MODE1= [3]
DASH_MODE2= [3, 7]
DASH_MODE3= [8, 7, 2, 7]
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
# print the lines of the page.
HPDF_Page_SetLineWidth (page, 1)
HPDF_Page_Rectangle (page, 50, 50, HPDF_Page_GetWidth(page) - 100,
HPDF_Page_GetHeight (page) - 110)
HPDF_Page_Stroke (page)
# print the title of the page (with positioning center).
HPDF_Page_SetFontAndSize (page, font, 24)
tw = HPDF_Page_TextWidth (page, page_title)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, (HPDF_Page_GetWidth(page) - tw) / 2,
HPDF_Page_GetHeight (page) - 50)
HPDF_Page_ShowText (page, page_title)
HPDF_Page_EndText (page)
HPDF_Page_SetFontAndSize (page, font, 10)
# Draw verious widths of lines.
HPDF_Page_SetLineWidth (page, 0)
draw_line (page, 60, 770, "line width = 0")
HPDF_Page_SetLineWidth (page, 1.0)
draw_line (page, 60, 740, "line width = 1.0")
HPDF_Page_SetLineWidth (page, 2.0)
draw_line (page, 60, 710, "line width = 2.0")
# Line dash pattern
HPDF_Page_SetLineWidth (page, 1.0)
HPDF_Page_SetDash (page, DASH_MODE1, 1, 1)
draw_line (page, 60, 680, "dash_ptn=[3], phase=1 -- "
"2 on, 3 off, 3 on...")
HPDF_Page_SetDash (page, DASH_MODE2, 2, 2)
draw_line (page, 60, 650, "dash_ptn=[7, 3], phase=2 -- "
"5 on 3 off, 7 on,...")
HPDF_Page_SetDash (page, DASH_MODE3, 4, 0)
draw_line (page, 60, 620, "dash_ptn=[8, 7, 2, 7], phase=0")
HPDF_Page_SetDash (page, NULL, 0, 0)
HPDF_Page_SetLineWidth (page, 30)
HPDF_Page_SetRGBStroke (page, 0.0, 0.5, 0.0)
# Line Cap Style
HPDF_Page_SetLineCap (page, HPDF_BUTT_END)
draw_line2 (page, 60, 570, "PDF_BUTT_END")
HPDF_Page_SetLineCap (page, HPDF_ROUND_END)
draw_line2 (page, 60, 505, "PDF_ROUND_END")
HPDF_Page_SetLineCap (page, HPDF_PROJECTING_SCUARE_END)
draw_line2 (page, 60, 440, "PDF_PROJECTING_SCUARE_END")
# Line Join Style
HPDF_Page_SetLineWidth (page, 30)
HPDF_Page_SetRGBStroke (page, 0.0, 0.0, 0.5)
HPDF_Page_SetLineJoin (page, HPDF_MITER_JOIN)
HPDF_Page_MoveTo (page, 120, 300)
HPDF_Page_LineTo (page, 160, 340)
HPDF_Page_LineTo (page, 200, 300)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 60, 360)
HPDF_Page_ShowText (page, "PDF_MITER_JOIN")
HPDF_Page_EndText (page)
HPDF_Page_SetLineJoin (page, HPDF_ROUND_JOIN)
HPDF_Page_MoveTo (page, 120, 195)
HPDF_Page_LineTo (page, 160, 235)
HPDF_Page_LineTo (page, 200, 195)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 60, 255)
HPDF_Page_ShowText (page, "PDF_ROUND_JOIN")
HPDF_Page_EndText (page)
HPDF_Page_SetLineJoin (page, HPDF_BEVEL_JOIN)
HPDF_Page_MoveTo (page, 120, 90)
HPDF_Page_LineTo (page, 160, 130)
HPDF_Page_LineTo (page, 200, 90)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 60, 150)
HPDF_Page_ShowText (page, "PDF_BEVEL_JOIN")
HPDF_Page_EndText (page)
# Draw Rectangle
HPDF_Page_SetLineWidth (page, 2)
HPDF_Page_SetRGBStroke (page, 0, 0, 0)
HPDF_Page_SetRGBFill (page, 0.75, 0.0, 0.0)
draw_rect (page, 300, 770, "Stroke")
HPDF_Page_Stroke (page)
draw_rect (page, 300, 720, "Fill")
HPDF_Page_Fill (page)
draw_rect (page, 300, 670, "Fill then Stroke")
HPDF_Page_FillStroke (page)
# Clip Rect
HPDF_Page_GSave (page); # Save the current graphic state
draw_rect (page, 300, 620, "Clip Rectangle")
HPDF_Page_Clip (page)
HPDF_Page_Stroke (page)
HPDF_Page_SetFontAndSize (page, font, 13)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 290, 600)
HPDF_Page_SetTextLeading (page, 12)
HPDF_Page_ShowText (page,
"Clip Clip Clip Clip Clip Clipi Clip Clip Clip")
HPDF_Page_ShowTextNextLine (page,
"Clip Clip Clip Clip Clip Clip Clip Clip Clip")
HPDF_Page_ShowTextNextLine (page,
"Clip Clip Clip Clip Clip Clip Clip Clip Clip")
HPDF_Page_EndText (page)
HPDF_Page_GRestore (page)
# Curve Example(CurveTo2)
x = 330
y = 440
x1 = 430
y1 = 530
x2 = 480
y2 = 470
x3 = 480
y3 = 90
HPDF_Page_SetRGBFill (page, 0, 0, 0)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 300, 540)
HPDF_Page_ShowText (page, "CurveTo2(x1, y1, x2. y2)")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x + 5, y - 5)
HPDF_Page_ShowText (page, "Current point")
HPDF_Page_MoveTextPos (page, x1 - x, y1 - y)
HPDF_Page_ShowText (page, "(x1, y1)")
HPDF_Page_MoveTextPos (page, x2 - x1, y2 - y1)
HPDF_Page_ShowText (page, "(x2, y2)")
HPDF_Page_EndText (page)
HPDF_Page_SetDash (page, DASH_MODE1, 1, 0)
HPDF_Page_SetLineWidth (page, 0.5)
HPDF_Page_MoveTo (page, x1, y1)
HPDF_Page_LineTo (page, x2, y2)
HPDF_Page_Stroke (page)
HPDF_Page_SetDash (page, NULL, 0, 0)
HPDF_Page_SetLineWidth (page, 1.5)
HPDF_Page_MoveTo (page, x, y)
HPDF_Page_CurveTo2 (page, x1, y1, x2, y2)
HPDF_Page_Stroke (page)
# Curve Example(CurveTo3)
y -= 150
y1 -= 150
y2 -= 150
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 300, 390)
HPDF_Page_ShowText (page, "CurveTo3(x1, y1, x2. y2)")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x + 5, y - 5)
HPDF_Page_ShowText (page, "Current point")
HPDF_Page_MoveTextPos (page, x1 - x, y1 - y)
HPDF_Page_ShowText (page, "(x1, y1)")
HPDF_Page_MoveTextPos (page, x2 - x1, y2 - y1)
HPDF_Page_ShowText (page, "(x2, y2)")
HPDF_Page_EndText (page)
HPDF_Page_SetDash (page, DASH_MODE1, 1, 0)
HPDF_Page_SetLineWidth (page, 0.5)
HPDF_Page_MoveTo (page, x, y)
HPDF_Page_LineTo (page, x1, y1)
HPDF_Page_Stroke (page)
HPDF_Page_SetDash (page, NULL, 0, 0)
HPDF_Page_SetLineWidth (page, 1.5)
HPDF_Page_MoveTo (page, x, y)
HPDF_Page_CurveTo3 (page, x1, y1, x2, y2)
HPDF_Page_Stroke (page)
# Curve Example(CurveTo)
y -= 150
y1 -= 160
y2 -= 130
x2 += 10
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 300, 240)
HPDF_Page_ShowText (page, "CurveTo(x1, y1, x2. y2, x3, y3)")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x + 5, y - 5)
HPDF_Page_ShowText (page, "Current point")
HPDF_Page_MoveTextPos (page, x1 - x, y1 - y)
HPDF_Page_ShowText (page, "(x1, y1)")
HPDF_Page_MoveTextPos (page, x2 - x1, y2 - y1)
HPDF_Page_ShowText (page, "(x2, y2)")
HPDF_Page_MoveTextPos (page, x3 - x2, y3 - y2)
HPDF_Page_ShowText (page, "(x3, y3)")
HPDF_Page_EndText (page)
HPDF_Page_SetDash (page, DASH_MODE1, 1, 0)
HPDF_Page_SetLineWidth (page, 0.5)
HPDF_Page_MoveTo (page, x, y)
HPDF_Page_LineTo (page, x1, y1)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, x2, y2)
HPDF_Page_LineTo (page, x3, y3)
HPDF_Page_Stroke (page)
HPDF_Page_SetDash (page, NULL, 0, 0)
HPDF_Page_SetLineWidth (page, 1.5)
HPDF_Page_MoveTo (page, x, y)
HPDF_Page_CurveTo (page, x1, y1, x2, y2, x3, y3)
HPDF_Page_Stroke (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- ttfont_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
SAMP_TXT = "The quick brown fox jumps over the lazy dog."
if (len(sys.argv) < 2):
printf("usage: ttfont_demo [path to font file] "
"-E(embedding font).\n")
return 1
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# Add a new page object.
page = HPDF_AddPage (pdf)
title_font = HPDF_GetFont (pdf, "Helvetica", NULL)
if (len(sys.argv) > 2 and sys.argv[2]=="-E"):
embed = HPDF_TRUE
else:
embed = HPDF_FALSE
detail_font_name = HPDF_LoadTTFontFromFile (pdf, sys.argv[1], embed)
detail_font = HPDF_GetFont (pdf, detail_font_name, NULL)
HPDF_Page_SetFontAndSize (page, title_font, 10)
HPDF_Page_BeginText (page)
# Move the position of the text to top of the page.
HPDF_Page_MoveTextPos(page, 10, 190)
HPDF_Page_ShowText (page, detail_font_name)
if (embed):
HPDF_Page_ShowText (page, "(Embedded Subset)")
HPDF_Page_SetFontAndSize (page, detail_font, 15)
HPDF_Page_MoveTextPos (page, 10, -20)
HPDF_Page_ShowText (page, "abcdefghijklmnopqrstuvwxyz")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "1234567890")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_SetFontAndSize (page, detail_font, 10)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -18)
HPDF_Page_SetFontAndSize (page, detail_font, 16)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -27)
HPDF_Page_SetFontAndSize (page, detail_font, 23)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -36)
HPDF_Page_SetFontAndSize (page, detail_font, 30)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -36)
pw = HPDF_Page_TextWidth (page, SAMP_TXT)
page_height = 210
page_width = pw + 40
HPDF_Page_SetWidth (page, page_width)
HPDF_Page_SetHeight (page, page_height)
# Finish to print text.
HPDF_Page_EndText (page)
HPDF_Page_SetLineWidth (page, 0.5)
HPDF_Page_MoveTo (page, 10, page_height - 25)
HPDF_Page_LineTo (page, page_width - 10, page_height - 25)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, page_height - 85)
HPDF_Page_LineTo (page, page_width - 10, page_height - 85)
HPDF_Page_Stroke (page)
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- arc_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
from grid_sheet import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetHeight (page, 220)
HPDF_Page_SetWidth (page, 200)
# draw grid to the page
print_grid (pdf, page)
# draw pie chart
# *
# * A: 45% Red
# * B: 25% Blue
# * C: 15% green
# * D: other yellow
# A
HPDF_Page_SetRGBFill (page, 1.0, 0, 0)
HPDF_Page_MoveTo (page, 100, 100)
HPDF_Page_LineTo (page, 100, 180)
HPDF_Page_Arc (page, 100, 100, 80, 0, 360 * 0.45)
pos = HPDF_Page_GetCurrentPos (page)
HPDF_Page_LineTo (page, 100, 100)
HPDF_Page_Fill (page)
# B
HPDF_Page_SetRGBFill (page, 0, 0, 1.0)
HPDF_Page_MoveTo (page, 100, 100)
HPDF_Page_LineTo (page, pos.x, pos.y)
HPDF_Page_Arc (page, 100, 100, 80, 360 * 0.45, 360 * 0.7)
pos = HPDF_Page_GetCurrentPos (page)
HPDF_Page_LineTo (page, 100, 100)
HPDF_Page_Fill (page)
# C
HPDF_Page_SetRGBFill (page, 0, 1.0, 0)
HPDF_Page_MoveTo (page, 100, 100)
HPDF_Page_LineTo (page, pos.x, pos.y)
HPDF_Page_Arc (page, 100, 100, 80, 360 * 0.7, 360 * 0.85)
pos = HPDF_Page_GetCurrentPos (page)
HPDF_Page_LineTo (page, 100, 100)
HPDF_Page_Fill (page)
# D
HPDF_Page_SetRGBFill (page, 1.0, 1.0, 0)
HPDF_Page_MoveTo (page, 100, 100)
HPDF_Page_LineTo (page, pos.x, pos.y)
HPDF_Page_Arc (page, 100, 100, 80, 360 * 0.85, 360)
pos = HPDF_Page_GetCurrentPos (page)
HPDF_Page_LineTo (page, 100, 100)
HPDF_Page_Fill (page)
# draw center circle
HPDF_Page_SetGrayStroke (page, 0)
HPDF_Page_SetGrayFill (page, 1)
HPDF_Page_Circle (page, 100, 100, 30)
HPDF_Page_Fill (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- make_rawimage.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
if (len(sys.argv) < 2):
printf ("usage: make_rawimage <in-file-name> <out-file-name>\n")
return 1
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# load image file.
image = HPDF_LoadPngImageFromFile (pdf, sys.argv[1])
iw = HPDF_Image_GetWidth (image)
ih = HPDF_Image_GetHeight (image)
bits_per_comp = HPDF_Image_GetBitsPerComponent (image)
cs = HPDF_Image_GetColorSpace (image)
printf ("width=%u\n", iw)
printf ("height=%u\n", ih)
printf ("bits_per_comp=%u\n", bits_per_comp)
printf ("color_space=%s\n", cs)
# save raw-data to file
stream = HPDF_FileWriter_New (pdf.mmgr, sys.argv[2])
if (not stream):
printf ("cannot open %s\n", sys.argv[2])
else:
HPDF_Stream_WriteToStream(image.stream, stream, 0, NULL)
HPDF_Stream_Free (stream)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Alternative PDF Library 1.0.0 >> -- text_demo2.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from grid_sheet import *
from math import *
global pdf
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
printf ("ERROR: error_no=%04X, detail_no=%u\n", error_no,
detail_no)
HPDF_Free (pdf)
global no
no = 0
def PrintText(page):
#char buf[512]
pos = HPDF_Page_GetCurrentTextPos (page)
no+=1
buf=".[%d]%0.2f %0.2f" %( no, pos.x, pos.y)
HPDF_Page_ShowText(page, buf)
def main ():
global pdf
rect=HPDF_Rect()
SAMP_TXT = "The quick brown fox jumps over the lazy dog. "
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetSize (page, HPDF_PAGE_SIZE_A5, HPDF_PAGE_PORTRAIT)
print_grid (pdf, page)
page_height = HPDF_Page_GetHeight (page)
font = HPDF_GetFont (pdf, "Helvetica", NULL)
HPDF_Page_SetTextLeading (page, 20)
# text_rect method
# HPDF_TALIGN_LEFT
rect.left = 25
rect.top = 545
rect.right = 200
rect.bottom = rect.top - 40
HPDF_Page_Rectangle (page, rect.left, rect.bottom, rect.right - rect.left,
rect.top - rect.bottom)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 10)
HPDF_Page_TextOut (page, rect.left, rect.top + 3, "HPDF_TALIGN_LEFT")
HPDF_Page_SetFontAndSize (page, font, 13)
HPDF_Page_TextRect (page, rect.left, rect.top, rect.right, rect.bottom,
SAMP_TXT, HPDF_TALIGN_LEFT, NULL)
HPDF_Page_EndText (page)
# HPDF_TALIGN_RIGTH
rect.left = 220
rect.right = 395
HPDF_Page_Rectangle (page, rect.left, rect.bottom, rect.right - rect.left,
rect.top - rect.bottom)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 10)
HPDF_Page_TextOut (page, rect.left, rect.top + 3, "HPDF_TALIGN_RIGTH")
HPDF_Page_SetFontAndSize (page, font, 13)
HPDF_Page_TextRect (page, rect.left, rect.top, rect.right, rect.bottom,
SAMP_TXT, HPDF_TALIGN_RIGHT, NULL)
HPDF_Page_EndText (page)
# HPDF_TALIGN_CENTER
rect.left = 25
rect.top = 475
rect.right = 200
rect.bottom = rect.top - 40
HPDF_Page_Rectangle (page, rect.left, rect.bottom, rect.right - rect.left,
rect.top - rect.bottom)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 10)
HPDF_Page_TextOut (page, rect.left, rect.top + 3, "HPDF_TALIGN_CENTER")
HPDF_Page_SetFontAndSize (page, font, 13)
HPDF_Page_TextRect (page, rect.left, rect.top, rect.right, rect.bottom,
SAMP_TXT, HPDF_TALIGN_CENTER, NULL)
HPDF_Page_EndText (page)
# HPDF_TALIGN_JUSTIFY
rect.left = 220
rect.right = 395
HPDF_Page_Rectangle (page, rect.left, rect.bottom, rect.right - rect.left,
rect.top - rect.bottom)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 10)
HPDF_Page_TextOut (page, rect.left, rect.top + 3, "HPDF_TALIGN_JUSTIFY")
HPDF_Page_SetFontAndSize (page, font, 13)
HPDF_Page_TextRect (page, rect.left, rect.top, rect.right, rect.bottom,
SAMP_TXT, HPDF_TALIGN_JUSTIFY, NULL)
HPDF_Page_EndText (page)
# Skewed coordinate system
HPDF_Page_GSave (page)
angle1 = 5
angle2 = 10
rad1 = angle1 / 180 * 3.141592
rad2 = angle2 / 180 * 3.141592
HPDF_Page_Concat (page, 1, tan(rad1), tan(rad2), 1, 25, 350)
rect.left = 0
rect.top = 40
rect.right = 175
rect.bottom = 0
HPDF_Page_Rectangle (page, rect.left, rect.bottom, rect.right - rect.left,
rect.top - rect.bottom)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 10)
HPDF_Page_TextOut (page, rect.left, rect.top + 3, "Skewed coordinate system")
HPDF_Page_SetFontAndSize (page, font, 13)
HPDF_Page_TextRect (page, rect.left, rect.top, rect.right, rect.bottom,
SAMP_TXT, HPDF_TALIGN_LEFT, NULL)
HPDF_Page_EndText (page)
HPDF_Page_GRestore (page)
# Rotated coordinate system
HPDF_Page_GSave (page)
angle1 = 5
rad1 = angle1 / 180 * 3.141592
HPDF_Page_Concat (page, cos(rad1), sin(rad1), -sin(rad1), cos(rad1), 220, 350)
rect.left = 0
rect.top = 40
rect.right = 175
rect.bottom = 0
HPDF_Page_Rectangle (page, rect.left, rect.bottom, rect.right - rect.left,
rect.top - rect.bottom)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 10)
HPDF_Page_TextOut (page, rect.left, rect.top + 3, "Rotated coordinate system")
HPDF_Page_SetFontAndSize (page, font, 13)
HPDF_Page_TextRect (page, rect.left, rect.top, rect.right, rect.bottom,
SAMP_TXT, HPDF_TALIGN_LEFT, NULL)
HPDF_Page_EndText (page)
HPDF_Page_GRestore (page)
# text along a circle
HPDF_Page_SetGrayStroke (page, 0)
HPDF_Page_Circle (page, 210, 190, 145)
HPDF_Page_Circle (page, 210, 190, 113)
HPDF_Page_Stroke (page)
angle1 = 360.0 / (len (SAMP_TXT))
angle2 = 180
HPDF_Page_BeginText (page)
font = HPDF_GetFont (pdf, "Courier-Bold", NULL)
HPDF_Page_SetFontAndSize (page, font, 30)
for i in range(len (SAMP_TXT)):
buf=[None, None]
rad1 = (angle2 - 90) / 180 * 3.141592
rad2 = angle2 / 180 * 3.141592
x = 210 + cos(rad2) * 122
y = 190 + sin(rad2) * 122
HPDF_Page_SetTextMatrix(page, cos(rad1), sin(rad1), -sin(rad1), cos(rad1), x, y)
buf[0] = SAMP_TXT[i]
buf[1] = chr(0)
HPDF_Page_ShowText (page, buf)
angle2 -= angle1
HPDF_Page_EndText (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- link_annotation.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def print_page (page, font, page_num):
HPDF_Page_SetWidth (page, 200)
HPDF_Page_SetHeight (page, 200)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 50, 150)
buf= "Page:%d" % page_num
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
def main():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
page=[None for i in range(9)]
rect=HPDF_Rect()
uri = "http://libharu.org"
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# create index page
index_page = HPDF_AddPage (pdf)
HPDF_Page_SetWidth (index_page, 300)
HPDF_Page_SetHeight (index_page, 220)
# Add 7 pages to the document.
for i in range(7):
page[i] = HPDF_AddPage (pdf)
print_page(page[i], font, i + 1)
HPDF_Page_BeginText (index_page)
HPDF_Page_SetFontAndSize (index_page, font, 10)
HPDF_Page_MoveTextPos (index_page, 15, 200)
HPDF_Page_ShowText (index_page, "Link Annotation Demo")
HPDF_Page_EndText (index_page)
##
# * Create Link-Annotation object on index page.
#
HPDF_Page_BeginText(index_page)
HPDF_Page_SetFontAndSize (index_page, font, 8)
HPDF_Page_MoveTextPos (index_page, 20, 180)
HPDF_Page_SetTextLeading (index_page, 23)
# page1 (HPDF_ANNOT_NO_HIGHTLIGHT)
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "Jump to Page1 (HilightMode=HPDF_ANNOT_NO_HIGHTLIGHT)")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_MoveToNextLine (index_page)
dst = HPDF_Page_CreateDestination (page[0])
annot = HPDF_Page_CreateLinkAnnot (index_page, rect, dst)
HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_NO_HIGHTLIGHT)
# page2 (HPDF_ANNOT_INVERT_BOX)
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "Jump to Page2 (HilightMode=HPDF_ANNOT_INVERT_BOX)")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_MoveToNextLine (index_page)
dst = HPDF_Page_CreateDestination (page[1])
annot = HPDF_Page_CreateLinkAnnot (index_page, rect, dst)
HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_INVERT_BOX)
# page3 (HPDF_ANNOT_INVERT_BORDER)
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "Jump to Page3 (HilightMode=HPDF_ANNOT_INVERT_BORDER)")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_MoveToNextLine (index_page)
dst = HPDF_Page_CreateDestination (page[2])
annot = HPDF_Page_CreateLinkAnnot (index_page, rect, dst)
HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_INVERT_BORDER)
# page4 (HPDF_ANNOT_DOWN_APPEARANCE)
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "Jump to Page4 (HilightMode=HPDF_ANNOT_DOWN_APPEARANCE)")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_MoveToNextLine (index_page)
dst = HPDF_Page_CreateDestination (page[3])
annot = HPDF_Page_CreateLinkAnnot (index_page, rect, dst)
HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_DOWN_APPEARANCE)
# page5 (dash border)
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "Jump to Page5 (dash border)")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_MoveToNextLine (index_page)
dst = HPDF_Page_CreateDestination (page[4])
annot = HPDF_Page_CreateLinkAnnot (index_page, rect, dst)
HPDF_LinkAnnot_SetBorderStyle (annot, 1, 3, 2)
# page6 (no border)
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "Jump to Page6 (no border)")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_MoveToNextLine (index_page)
dst = HPDF_Page_CreateDestination (page[5])
annot = HPDF_Page_CreateLinkAnnot (index_page, rect, dst)
HPDF_LinkAnnot_SetBorderStyle (annot, 0, 0, 0)
# page7 (bold border)
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "Jump to Page7 (bold border)")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_MoveToNextLine (index_page)
dst = HPDF_Page_CreateDestination (page[6])
annot = HPDF_Page_CreateLinkAnnot (index_page, rect, dst)
HPDF_LinkAnnot_SetBorderStyle (annot, 2, 0, 0)
# URI link
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "URI (")
HPDF_Page_ShowText (index_page, uri)
HPDF_Page_ShowText (index_page, ")")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_CreateURILinkAnnot (index_page, rect, uri)
HPDF_Page_EndText (index_page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main()
| Python |
#====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
#
# This software consists of voluntary contributions made by many
# individuals on behalf of the Apache Software Foundation. For more
# information on the Apache Software Foundation, please see
# <http://www.apache.org/>.
#
import os
import re
import tempfile
import shutil
ignore_pattern = re.compile('^(.svn|target|bin|classes)')
java_pattern = re.compile('^.*\.java')
annot_pattern = re.compile('import org\.apache\.http\.annotation\.')
def process_dir(dir):
files = os.listdir(dir)
for file in files:
f = os.path.join(dir, file)
if os.path.isdir(f):
if not ignore_pattern.match(file):
process_dir(f)
else:
if java_pattern.match(file):
process_source(f)
def process_source(filename):
tmp = tempfile.mkstemp()
tmpfd = tmp[0]
tmpfile = tmp[1]
try:
changed = False
dst = os.fdopen(tmpfd, 'w')
try:
src = open(filename)
try:
for line in src:
if annot_pattern.match(line):
changed = True
line = line.replace('import org.apache.http.annotation.', 'import net.jcip.annotations.')
dst.write(line)
finally:
src.close()
finally:
dst.close();
if changed:
shutil.move(tmpfile, filename)
else:
os.remove(tmpfile)
except:
os.remove(tmpfile)
process_dir('.')
| Python |
from setuptools import setup, find_packages
version = __import__('sqlserver_ado').get_version()
setup(
name='django-mssql',
version=version.replace(' ', '-'),
maintainer='Michael Manfre',
maintainer_email='mmanfre@gmail.com',
url='http://django-mssql.googlecode.com/',
description="Django backend database support for MS SQL 2005 and up.",
license='Apache License, Version 2.0',
packages=find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Framework :: Django",
"Environment :: Web Environment",
],
)
| Python |
"""An example of using the internal DB-API module without any Django."""
# Adds the relative path for the MS SQL Server backend to Python's import path.
# We do this so we can run this module from a checkout for demo purposes
# without having to install it.
def _hack_backend_path():
import os, sys
backend_path = os.path.join(os.path.abspath(os.path.dirname(".")), "../source")
sys.path.append(backend_path)
# Import the dbapi module, after hacking the import path.
_hack_backend_path()
import sqlserver_ado.dbapi as db
def _print_names(results):
for item in results:
print item[1]
def sproc_1(connection):
"Calls a sproc using execute with explicit parameter markers."
c = connection.cursor()
c.execute('uspAppUser_GetAll %s', ['current_user'])
_print_names(c.fetchall())
c.close()
def sproc_1b(connection):
"Calls a sproc using execute with explicit parameter markers."
c = connection.cursor()
c.execute('uspAppUser_GetAll %s', [None])
_print_names(c.fetchall())
c.close()
def sproc_2(connection):
"Calls a sproc using 'callproc'."
c = connection.cursor()
c.callproc('uspAppUser_GetAll', ['current_user'])
_print_names(c.fetchall())
c.close()
def sproc_2b(connection):
"Calls a sproc using 'callproc'."
c = connection.cursor()
c.callproc('uspAppUser_GetAll', [0])
_print_names(c.fetchall())
c.close()
def main():
connection = db.connect("PROVIDER=SQLOLEDB;DATA SOURCE=localhost\\ss2005;Initial Catalog=Ted;Integrated Security=SSPI")
sproc_2b(connection)
connection.close()
main()
| Python |
# Common database settings for all test projects
# plus a path hack to find the backend module.
import os
# use old style settings for non-django dbapi tests
DATABASE_NAME = 'django_test_backend'
DATABASE_HOST = os.environ['COMPUTERNAME'] + '\\' + os.environ.get('SQLINSTANCE', 'ss2008')
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_COMMAND_TIMEOUT = 30
DATABASE_ENGINE = 'sqlserver_ado'
# django required database settings
DATABASES = {
'default': {
'NAME': DATABASE_NAME,
'ENGINE': 'sqlserver_ado',
'HOST': DATABASE_HOST,
'USER': DATABASE_USER,
'PASSWORD': DATABASE_PASSWORD,
'COMMAND_TIMEOUT': DATABASE_COMMAND_TIMEOUT,
'OPTIONS' : {
#'provider': 'SQLNCLI10',
#'extra_params': 'DataTypeCompatibility=80;MARS Connection=True;',
},
}
}
# Adds the relative path for the MS SQL Server backend to Python's import path.
# Note that it pops up two levels because this file is imported from modules another level down,
# not directly.
def _hack_backend_path():
import os, sys
backend_path = os.path.join(os.path.abspath(os.path.dirname(".")), "../../source")
sys.path.append(backend_path)
_hack_backend_path()
def make_connection_string():
# This function duplicates the Django connection string logic, but is meant
# to be used by non-Django tests that want to share test db settings.
settings = DATABASES.get('default', {})
db_host = settings.get('HOST', '127.0.0.1')
db_port = settings.get('PORT', '')
db_name = settings.get('NAME', '')
db_user = settings.get('USER', '')
db_pass = settings.get('PASSWORD', '')
if db_name == '':
raise Exception("You need to specify a DATABASE_NAME in your Django settings file.")
# If a port is given, force a TCP/IP connection. The host should be an IP address in this case.
if db_port != '':
if not _looks_like_ipaddress(db_host):
raise Exception("When using DATABASE_PORT, DATABASE_HOST must be an IP address.")
datasource = '%s,%s;Network Library=DBMSSOCN' % (db_host, db_port)
# If no user is specified, use integrated security.
if db_user != '':
auth_string = "UID=%s;PWD=%s" % (db_user, db_pass)
else:
auth_string = "Integrated Security=SSPI"
return "PROVIDER=SQLOLEDB;DATA SOURCE=%s;Initial Catalog=%s;%s" % \
(db_host, db_name, auth_string)
| Python |
# This dictionary maps Field objects to their associated Server Server column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__.
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation, TEST_DATABASE_PREFIX
import sys
class DatabaseCreation(BaseDatabaseCreation):
data_types = {
'AutoField': 'int IDENTITY (1, 1)',
'BigAutoField': 'bigint IDENTITY (1, 1)',
'BigIntegerField': 'bigint',
'BooleanField': 'bit',
'CharField': 'nvarchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'nvarchar(%(max_length)s)',
'DateField': 'datetime',
'DateTimeField': 'datetime',
'DecimalField': 'decimal(%(max_digits)s, %(decimal_places)s)',
'FileField': 'nvarchar(%(max_length)s)',
'FilePathField': 'nvarchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'int',
'IPAddressField': 'nvarchar(15)',
'NullBooleanField': 'bit',
'OneToOneField': 'int',
'PositiveIntegerField': 'int CHECK ([%(column)s] >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK ([%(column)s] >= 0)',
'SlugField': 'nvarchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'nvarchar(max)',
'TimeField': 'datetime',
}
def _disable_transactions(self, verbosity=1):
"""Temporarily turn off transactions for non-transactionable SQL"""
if self.connection.connection.supportsTransactions:
if verbosity >= 1:
print "Disabling Transactions"
self._supports_transactions = self.connection.connection.supportsTransactions
self.connection._commit()
self.connection.connection.supportsTransactions = False
def _reenable_transactions(self, verbosity=1):
"""Reset transaction support to state prior to _disable_transactions() call"""
if hasattr(self, '_supports_transactions'):
if verbosity >= 1:
print "Re-enabling Transactions"
self.connection.connection.supportsTransactions = self._supports_transactions
def _create_test_db(self, verbosity=1, autoclobber=False):
test_database_name = self._test_database_name(settings)
if not self._test_database_create(settings):
if verbosity >= 1:
print "Skipping Test DB creation"
return test_database_name
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = self.connection.cursor()
suffix = self.sql_table_creation_suffix()
qn = self.connection.ops.quote_name
try:
self._disable_transactions()
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
self._reenable_transactions()
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
self._disable_transactions()
if verbosity >= 1:
print "Destroying old test database..."
cursor.execute("DROP DATABASE %s" % qn(test_database_name))
if verbosity >= 1:
print "Creating test database..."
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
self._reenable_transactions()
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
return test_database_name
def _destroy_test_db(self, test_database_name, verbosity=1):
"Internal implementation - remove the test db tables."
if self._test_database_create(settings):
qn = self.connection.ops.quote_name
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
cursor = self.connection.cursor()
self.set_autocommit()
import time
time.sleep(1) # To avoid "database is being accessed by other users" errors.
self._disable_transactions()
cursor.execute("DROP DATABASE %s" % self.connection.ops.quote_name(test_database_name))
self._reenable_transactions()
self.connection.close()
else:
print "Skipping Test DB destruction"
def _test_database_create(self, settings):
if self.connection.settings_dict.has_key('TEST_CREATE'):
return self.connection.settings_dict.get('TEST_CREATE', True)
if hasattr(settings, 'TEST_DATABASE_CREATE'):
return settings.TEST_DATABASE_CREATE
else:
return True
def _test_database_name(self, settings):
try:
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
if self.connection.settings_dict['TEST_NAME']:
name = self.connection.settings_dict['TEST_NAME']
except AttributeError:
if hasattr(settings, 'TEST_DATABASE_NAME') and settings.TEST_DATABASE_NAME:
name = settings.TEST_DATABASE_NAME
else:
name = TEST_DATABASE_PREFIX + settings.DATABASE_NAME
return name
| Python |
from django.db.models.sql import compiler
import re
# query_class returns the base class to use for Django queries.
# The custom 'SqlServerQuery' class derives from django.db.models.sql.query.Query
# which is passed in as "QueryClass" by Django itself.
#
# SqlServerQuery overrides:
# ...insert queries to add "SET IDENTITY_INSERT" if needed.
# ...select queries to emulate LIMIT/OFFSET for sliced queries.
_re_order_limit_offset = re.compile(
r'(?:ORDER BY\s+(.+?))?\s*(?:LIMIT\s+(\d+))?\s*(?:OFFSET\s+(\d+))?$')
# Pattern to find the quoted column name at the end of a field specification
_re_pat_col = re.compile(r"\[([^[]+)\]$")
# Pattern to find each of the parts of a column name (extra_select, table, field)
_re_pat_col_parts = re.compile(
r'(?:' +
r'(\([^\)]+\))\s+as\s+' +
r'|(\[[^[]+\])\.' +
r')?' +
r'\[([^[]+)\]$',
re.IGNORECASE
)
# Pattern used in column aliasing to find sub-select placeholders
_re_col_placeholder = re.compile(r'\{_placeholder_(\d+)\}')
def _break(s, find):
"""Break a string s into the part before the substring to find,
and the part including and after the substring."""
i = s.find(find)
return s[:i], s[i:]
def _get_order_limit_offset(sql):
return _re_order_limit_offset.search(sql).groups()
def _remove_order_limit_offset(sql):
return _re_order_limit_offset.sub('',sql).split(None, 1)[1]
class SQLCompiler(compiler.SQLCompiler):
def resolve_columns(self, row, fields=()):
# If the results are sliced, the resultset will have an initial
# "row number" column. Remove this column before the ORM sees it.
if self._using_row_number:
return row[1:]
return row
def as_sql(self, with_limits=True, with_col_aliases=False):
self._using_row_number = False
# Get out of the way if we're not a select query or there's no limiting involved.
check_limits = with_limits and (self.query.low_mark or self.query.high_mark is not None)
if not check_limits:
return super(SQLCompiler, self).as_sql(with_limits, with_col_aliases)
raw_sql, fields = super(SQLCompiler, self).as_sql(False, with_col_aliases)
# Check for high mark only and replace with "TOP"
if self.query.high_mark is not None and not self.query.low_mark:
_select = 'SELECT'
if self.query.distinct:
_select += ' DISTINCT'
sql = re.sub(r'(?i)^%s' % _select, '%s TOP %s' % (_select, self.query.high_mark), raw_sql, 1)
return sql, fields
# Else we have limits; rewrite the query using ROW_NUMBER()
self._using_row_number = True
order, limit_ignore, offset_ignore = _get_order_limit_offset(raw_sql)
qn = self.connection.ops.quote_name
inner_table_name = qn('AAAA')
# Using ROW_NUMBER requires an ordering
if order is None:
meta = self.query.get_meta()
column = meta.pk.db_column or meta.pk.get_attname()
order = '%s.%s ASC' % (inner_table_name, qn(column))
else:
# remap order for injected subselect
new_order = []
for x in order.split(','):
if x.find('.') != -1:
tbl, col = x.rsplit('.', 1)
else:
col = x
new_order.append('%s.%s' % (inner_table_name, col))
order = ', '.join(new_order)
where_row_num = "%s < _row_num" % (self.query.low_mark)
if self.query.high_mark:
where_row_num += " and _row_num <= %s" % (self.query.high_mark)
# Lop off ORDER... and the initial "SELECT"
inner_select = _remove_order_limit_offset(raw_sql)
outer_fields, inner_select = self._alias_columns(inner_select)
# map a copy of outer_fields for injected subselect
f = []
for x in outer_fields.split(','):
i = x.find(' AS ')
if i != -1:
x = x[i+4:]
if x.find('.') != -1:
tbl, col = x.rsplit('.', 1)
else:
col = x
f.append('%s.%s' % (inner_table_name, col.strip()))
# inject a subselect to get around OVER requiring ORDER BY to come from FROM
inner_select = '%s FROM ( SELECT %s ) AS %s'\
% (', '.join(f), inner_select, inner_table_name)
sql = "SELECT _row_num, %s FROM ( SELECT ROW_NUMBER() OVER ( ORDER BY %s) as _row_num, %s) as QQQ where %s"\
% (outer_fields, order, inner_select, where_row_num)
return sql, fields
def _alias_columns(self, sql):
"""Return tuple of SELECT and FROM clauses, aliasing duplicate column names."""
qn = self.connection.ops.quote_name
outer = list()
inner = list()
names_seen = list()
# replace all parens with placeholders
paren_depth, paren_buf = 0, ['']
parens, i = {}, 0
for ch in sql:
if ch == '(':
i += 1
paren_depth += 1
paren_buf.append('')
elif ch == ')':
paren_depth -= 1
key = '_placeholder_{0}'.format(i)
buf = paren_buf.pop()
# store the expanded paren string
parens[key] = buf.format(**parens)
paren_buf[paren_depth] += '({' + key + '})'
else:
paren_buf[paren_depth] += ch
def _replace_sub(col):
"""Replace all placeholders with expanded values"""
while True:
m = _re_col_placeholder.search(col)
if m:
try:
key = '_placeholder_{0}'.format(
int(m.group(1))
)
col = col.format(**{
key : parens[key]
})
except:
# not a substituted value
break
else:
break
return col
temp_sql = ''.join(paren_buf)
select_list, from_clause = _break(temp_sql, ' FROM [')
for col in [x.strip() for x in select_list.split(',')]:
match = _re_pat_col.search(col)
if match:
col_name = match.group(1)
col_key = col_name.lower()
if col_key in names_seen:
alias = qn('%s___%s' % (col_name, names_seen.count(col_key)))
outer.append(alias)
col = _replace_sub(col)
inner.append("%s as %s" % (col, alias))
else:
replaced = _replace_sub(col)
outer.append(qn(col_name))
inner.append(replaced)
names_seen.append(col_key)
else:
raise Exception('Unable to find a column name when parsing SQL: {0}'.format(col))
return ', '.join(outer), ', '.join(inner) + from_clause.format(**parens)
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
def as_sql(self, *args, **kwargs):
# Fix for Django ticket #14019
if not hasattr(self, 'return_id'):
self.return_id = False
sql, params = super(SQLInsertCompiler, self).as_sql(*args, **kwargs)
meta = self.query.get_meta()
if meta.has_auto_field:
# db_column is None if not explicitly specified by model field
auto_field_column = meta.auto_field.db_column or meta.auto_field.column
if auto_field_column in self.query.columns:
quoted_table = self.connection.ops.quote_name(meta.db_table)
sql = "SET IDENTITY_INSERT %s ON;%s;SET IDENTITY_INSERT %s OFF" %\
(quoted_table, sql, quoted_table)
return sql, params
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, SQLCompiler):
pass
| Python |
"""A DB API 2.0 interface to SQL Server for Django
Forked from: adodbapi v2.1
Copyright (C) 2002 Henrik Ekelund, version 2.1 by Vernon Cole
* http://adodbapi.sourceforge.net/
* http://sourceforge.net/projects/pywin32/
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
* Version 2.1D by Adam Vandenberg, forked for Django backend use.
This module is a db-api 2 interface for ADO, but is Django & SQL Server.
It won't work against other ADO sources (like Access.)
DB-API 2.0 specification: http://www.python.org/dev/peps/pep-0249/
"""
import sys
import time
import datetime
import re
try:
import decimal
except ImportError:
from django.utils import _decimal as decimal
from django.db.utils import IntegrityError as DjangoIntegrityError
import pythoncom
import win32com.client
from ado_consts import *
# DB API default values
apilevel = '2.0'
# 1: Threads may share the module, but not connections.
threadsafety = 1
# The underlying ADO library expects parameters as '?', but this wrapper
# expects '%s' parameters. This wrapper takes care of the conversion.
paramstyle = 'format'
# Set defaultIsolationLevel on module level before creating the connection.
# It may be one of "adXact..." consts.
defaultIsolationLevel = adXactReadCommitted
# Set defaultCursorLocation on module level before creating the connection.
# It may be one of the "adUse..." consts.
defaultCursorLocation = adUseServer
# Used for COM to Python date conversions.
_ordinal_1899_12_31 = datetime.date(1899,12,31).toordinal()-1
_milliseconds_per_day = 24*60*60*1000
class MultiMap(object):
def __init__(self, mapping, default=None):
"""Defines a mapping with multiple keys per value.
mapping is a dict of: tuple(key, key, key...) => value
"""
self.storage = dict()
self.default = default
for keys, value in mapping.iteritems():
for key in keys:
self.storage[key] = value
def __getitem__(self, key):
return self.storage.get(key, self.default)
def standardErrorHandler(connection, cursor, errorclass, errorvalue):
err = (errorclass, errorvalue)
connection.messages.append(err)
if cursor is not None:
cursor.messages.append(err)
raise errorclass(errorvalue)
class Error(StandardError): pass
class Warning(StandardError): pass
class InterfaceError(Error): pass
class DatabaseError(Error): pass
class InternalError(DatabaseError): pass
class OperationalError(DatabaseError): pass
class ProgrammingError(DatabaseError): pass
class IntegrityError(DatabaseError, DjangoIntegrityError): pass
class DataError(DatabaseError): pass
class NotSupportedError(DatabaseError): pass
class _DbType(object):
def __init__(self,valuesTuple):
self.values = valuesTuple
def __eq__(self, other): return other in self.values
def __ne__(self, other): return other not in self.values
def connect(connection_string, timeout=30):
"""Connect to a database.
connection_string -- An ADODB formatted connection string, see:
http://www.connectionstrings.com/?carrier=sqlserver2005
timeout -- A command timeout value, in seconds (default 30 seconds)
"""
try:
pythoncom.CoInitialize()
c = win32com.client.Dispatch('ADODB.Connection')
c.CommandTimeout = timeout
c.ConnectionString = connection_string
c.Open()
useTransactions = _use_transactions(c)
return Connection(c, useTransactions)
except Exception, e:
raise OperationalError(e, "Error opening connection: " + connection_string)
def _use_transactions(c):
"""Return True if the given ADODB.Connection supports transactions."""
for prop in c.Properties:
if prop.Name == 'Transaction DDL':
return prop.Value > 0
return False
def format_parameters(parameters, show_value=False):
"""Format a collection of ADO Command Parameters.
Used by error reporting in _execute_command.
"""
directions = {
0: 'Unknown',
1: 'Input',
2: 'Output',
3: 'In/Out',
4: 'Return',
}
if show_value:
desc = [
"Name: %s, Dir.: %s, Type: %s, Size: %s, Value: \"%s\", Precision: %s, NumericScale: %s" %\
(p.Name, directions[p.Direction], adTypeNames.get(p.Type, str(p.Type)+' (unknown type)'), p.Size, p.Value, p.Precision, p.NumericScale)
for p in parameters ]
else:
desc = [
"Name: %s, Dir.: %s, Type: %s, Size: %s, Precision: %s, NumericScale: %s" %\
(p.Name, directions[p.Direction], adTypeNames.get(p.Type, str(p.Type)+' (unknown type)'), p.Size, p.Precision, p.NumericScale)
for p in parameters ]
return '[' + ', '.join(desc) + ']'
def _configure_parameter(p, value):
"""Configure the given ADO Parameter 'p' with the Python 'value'."""
if p.Direction not in [adParamInput, adParamInputOutput, adParamUnknown]:
return
if isinstance(value, basestring):
p.Value = value
p.Size = len(value)
elif isinstance(value, buffer):
p.Size = len(value)
p.AppendChunk(value)
elif isinstance(value, decimal.Decimal):
p.Value = value
exponent = value.as_tuple()[2]
digit_count = len(value.as_tuple()[1])
if exponent == 0:
p.NumericScale = 0
p.Precision = digit_count
elif exponent < 0:
p.NumericScale = -exponent
p.Precision = digit_count
if p.Precision < p.NumericScale:
p.Precision = p.NumericScale
elif exponent > 0:
p.NumericScale = 0
p.Precision = digit_count + exponent
else:
# For any other type, set the value and let pythoncom do the right thing.
p.Value = value
# Use -1 instead of 0 for empty strings and buffers
if p.Size == 0:
p.Size = -1
class Connection(object):
def __init__(self, adoConn, useTransactions=False):
self.adoConn = adoConn
self.errorhandler = None
self.messages = []
self.adoConn.CursorLocation = defaultCursorLocation
self.supportsTransactions = useTransactions
if self.supportsTransactions:
self.adoConn.IsolationLevel = defaultIsolationLevel
self.adoConn.BeginTrans() # Disables autocommit per DBPAI
def _raiseConnectionError(self, errorclass, errorvalue):
eh = self.errorhandler
if eh is None:
eh = standardErrorHandler
eh(self, None, errorclass, errorvalue)
def _close_connection(self):
"""Close the underlying ADO Connection object, rolling back an active transaction if supported."""
if self.supportsTransactions:
self.adoConn.RollbackTrans()
self.adoConn.Close()
def close(self):
"""Close the database connection."""
self.messages = []
try:
self._close_connection()
except Exception, e:
self._raiseConnectionError(InternalError, e)
pythoncom.CoUninitialize()
def commit(self):
"""Commit a pending transaction to the database.
Note that if the database supports an auto-commit feature, this must
be initially off.
"""
self.messages = []
if not self.supportsTransactions:
return
try:
self.adoConn.CommitTrans()
if not(self.adoConn.Attributes & adXactCommitRetaining):
#If attributes has adXactCommitRetaining it performs retaining commits that is,
#calling CommitTrans automatically starts a new transaction. Not all providers support this.
#If not, we will have to start a new transaction by this command:
self.adoConn.BeginTrans()
except Exception, e:
self._raiseConnectionError(Error, e)
def rollback(self):
"""Abort a pending transaction."""
self.messages = []
if not self.supportsTransactions:
self._raiseConnectionError(NotSupportedError, None)
self.adoConn.RollbackTrans()
if not(self.adoConn.Attributes & adXactAbortRetaining):
#If attributes has adXactAbortRetaining it performs retaining aborts that is,
#calling RollbackTrans automatically starts a new transaction. Not all providers support this.
#If not, we will have to start a new transaction by this command:
self.adoConn.BeginTrans()
def cursor(self):
"""Return a new Cursor object using the current connection."""
self.messages = []
return Cursor(self)
def printADOerrors(self):
print 'ADO Errors (%i):' % self.adoConn.Errors.Count
for e in self.adoConn.Errors:
print 'Description: %s' % e.Description
print 'Error: %s %s ' % (e.Number, adoErrors.get(e.Number, "unknown"))
if e.Number == ado_error_TIMEOUT:
print 'Timeout Error: Try using adodbpi.connect(constr,timeout=Nseconds)'
print 'Source: %s' % e.Source
print 'NativeError: %s' % e.NativeError
print 'SQL State: %s' % e.SQLState
def _suggest_error_class(self):
"""Introspect the current ADO Errors and determine an appropriate error class.
Error.SQLState is a SQL-defined error condition, per the SQL specification:
http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt
The 23000 class of errors are integrity errors.
Error 40002 is a transactional integrity error.
"""
if self.adoConn is not None:
for e in self.adoConn.Errors:
state = str(e.SQLState)
if state.startswith('23') or state=='40002':
return IntegrityError
return DatabaseError
def __del__(self):
try:
self._close_connection()
except: pass
self.adoConn = None
class Cursor(object):
## This read-only attribute is a sequence of 7-item sequences.
## Each of these sequences contains information describing one result column:
## (name, type_code, display_size, internal_size, precision, scale, null_ok).
## This attribute will be None for operations that do not return rows or if the
## cursor has not had an operation invoked via the executeXXX() method yet.
## The type_code can be interpreted by comparing it to the Type Objects specified in the section below.
description = None
## This read-only attribute specifies the number of rows that the last executeXXX() produced
## (for DQL statements like select) or affected (for DML statements like update or insert).
## The attribute is -1 in case no executeXXX() has been performed on the cursor or
## the rowcount of the last operation is not determinable by the interface.[7]
## NOTE: -- adodbapi returns "-1" by default for all select statements
rowcount = -1
# Arraysize specifies the number of rows to fetch at a time with fetchmany().
arraysize = 1
def __init__(self, connection):
self.messages = []
self.connection = connection
self.rs = None
self.description = None
self.errorhandler = connection.errorhandler
def __iter__(self):
return iter(self.fetchone, None)
def __enter__(self):
"Allow database cursors to be used with context managers."
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"Allow database cursors to be used with context managers."
self.close()
def _raiseCursorError(self, errorclass, errorvalue):
eh = self.errorhandler
if eh is None:
eh = standardErrorHandler
eh(self.connection, self, errorclass, errorvalue)
def _description_from_recordset(self, recordset):
# Abort if closed or no recordset.
if (recordset is None) or (recordset.State == adStateClosed):
self.rs = None
self.description = None
return
# Since we use a forward-only cursor, rowcount will always return -1
self.rowcount = -1
self.rs = recordset
desc = list()
for f in self.rs.Fields:
display_size = None
if not(self.rs.EOF or self.rs.BOF):
display_size = f.ActualSize
null_ok = bool(f.Attributes & adFldMayBeNull)
desc.append( (f.Name, f.Type, display_size, f.DefinedSize, f.Precision, f.NumericScale, null_ok) )
self.description = desc
def close(self):
"""Close the cursor."""
self.messages = []
self.connection = None
if self.rs and self.rs.State != adStateClosed:
self.rs.Close()
self.rs = None
def _new_command(self, command_type=adCmdText):
self.cmd = None
self.messages = []
if self.connection is None:
self._raiseCursorError(Error, None)
return
try:
self.cmd = win32com.client.Dispatch("ADODB.Command")
self.cmd.ActiveConnection = self.connection.adoConn
self.cmd.CommandTimeout = self.connection.adoConn.CommandTimeout
self.cmd.CommandType = command_type
except:
self._raiseCursorError(DatabaseError, None)
def _execute_command(self):
# Sprocs may have an integer return value
self.return_value = None
try:
recordset = self.cmd.Execute()
self.rowcount = recordset[1]
self._description_from_recordset(recordset[0])
except Exception, e:
_message = ""
if hasattr(e, 'args'): _message += str(e.args)+"\n"
_message += "Command:\n%s\nParameters:\n%s" % (self.cmd.CommandText, format_parameters(self.cmd.Parameters, True))
klass = self.connection._suggest_error_class()
self._raiseCursorError(klass, _message)
def callproc(self, procname, parameters=None):
"""Call a stored database procedure with the given name.
The sequence of parameters must contain one entry for each
argument that the sproc expects. The result of the
call is returned as modified copy of the input
sequence. Input parameters are left untouched, output and
input/output parameters replaced with possibly new values.
The sproc may also provide a result set as output,
which is available through the standard .fetch*() methods.
Extension: A "return_value" property may be set on the
cursor if the sproc defines an integer return value.
"""
self._new_command(adCmdStoredProc)
self.cmd.CommandText = procname
self.cmd.Parameters.Refresh()
try:
# Return value is 0th ADO parameter. Skip it.
for i, p in enumerate(tuple(self.cmd.Parameters)[1:]):
_configure_parameter(p, parameters[i])
except:
_message = u'Converting Parameter %s: %s, %s\n' %\
(p.Name, ado_type_name(p.Type), repr(parameters[i]))
self._raiseCursorError(DataError, _message)
self._execute_command()
p_return_value = self.cmd.Parameters(0)
self.return_value = _convert_to_python(p_return_value.Value, p_return_value.Type)
return [_convert_to_python(p.Value, p.Type)
for p in tuple(self.cmd.Parameters)[1:] ]
def execute(self, operation, parameters=None):
"""Prepare and execute a database operation (query or command).
Return value is not defined.
"""
self._new_command()
if parameters is None:
parameters = list()
parameter_replacements = list()
for i, value in enumerate(parameters):
if value is None:
parameter_replacements.append('NULL')
continue
if isinstance(value, basestring) and value == "":
parameter_replacements.append("''")
continue
# Otherwise, process the non-NULL, non-empty string parameter.
parameter_replacements.append('?')
try:
p = self.cmd.CreateParameter('p%i' % i, _ado_type(value))
except KeyError:
_message = u'Failed to map python type "%s" to an ADO type' % (value.__class__.__name__,)
self._raiseCursorError(DataError, _message)
except:
_message = u'Creating Parameter p%i, %s' % (i, _ado_type(value))
self._raiseCursorError(DataError, _message)
try:
_configure_parameter(p, value)
self.cmd.Parameters.Append(p)
except:
_message = u'Converting Parameter %s: %s, %s\n' %\
(p.Name, ado_type_name(p.Type), repr(value))
self._raiseCursorError(DataError, _message)
# Replace params with ? or NULL
if parameter_replacements:
operation = operation % tuple(parameter_replacements)
self.cmd.CommandText = operation
self._execute_command()
def executemany(self, operation, seq_of_parameters):
"""Execute the given command against all parameter sequences or mappings given in seq_of_parameters."""
self.messages = list()
total_recordcount = 0
for params in seq_of_parameters:
self.execute(operation, params)
if self.rowcount == -1:
total_recordcount = -1
if total_recordcount != -1:
total_recordcount += self.rowcount
self.rowcount = total_recordcount
def _fetch(self, rows=None):
"""Fetch rows from the current recordset.
rows -- Number of rows to fetch, or None (default) to fetch all rows.
"""
if self.connection is None or self.rs is None:
self._raiseCursorError(Error, None)
return
if self.rs.State == adStateClosed or self.rs.BOF or self.rs.EOF:
if rows == 1: # fetchone returns None
return None
else: # fetchall and fetchmany return empty lists
return list()
if rows:
ado_results = self.rs.GetRows(rows)
else:
ado_results = self.rs.GetRows()
py_columns = list()
column_types = [column_desc[1] for column_desc in self.description]
for ado_type, column in zip(column_types, ado_results):
py_columns.append( [_convert_to_python(cell, ado_type) for cell in column] )
return tuple(zip(*py_columns))
def fetchone(self):
"""Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
An Error (or subclass) exception is raised if the previous call to executeXXX()
did not produce any result set or no call was issued yet.
"""
self.messages = list()
result = self._fetch(1)
if result: # return record (not list of records)
return result[0]
return None
def fetchmany(self, size=None):
"""Fetch the next set of rows of a query result, returning a list of tuples. An empty sequence is returned when no more rows are available."""
self.messages = list()
if size is None:
size = self.arraysize
return self._fetch(size)
def fetchall(self):
"""Fetch all remaining rows of a query result, returning them as a sequence of sequences."""
self.messages = list()
return self._fetch()
def nextset(self):
"""Skip to the next available recordset, discarding any remaining rows from the current recordset.
If there are no more sets, the method returns None. Otherwise, it returns a true
value and subsequent calls to the fetch methods will return rows from the next result set.
"""
self.messages = list()
if self.connection is None or self.rs is None:
self._raiseCursorError(Error, None)
return None
recordset = self.rs.NextRecordset()[0]
if recordset is None:
return None
self._description_from_recordset(recordset)
return True
def setinputsizes(self, sizes): pass
def setoutputsize(self, size, column=None): pass
# Type specific constructors as required by the DB-API 2 specification.
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
Binary = buffer
def DateFromTicks(ticks):
"""Construct an object holding a date value from the given # of ticks."""
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
"""Construct an object holding a time value from the given # of ticks."""
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
"""Construct an object holding a timestamp value from the given # of ticks."""
return Timestamp(*time.localtime(ticks)[:6])
adoIntegerTypes = (adInteger,adSmallInt,adTinyInt,adUnsignedInt,adUnsignedSmallInt,adUnsignedTinyInt,adError)
adoRowIdTypes = (adChapter,)
adoLongTypes = (adBigInt, adUnsignedBigInt, adFileTime)
adoExactNumericTypes = (adDecimal, adNumeric, adVarNumeric, adCurrency)
adoApproximateNumericTypes = (adDouble, adSingle)
adoStringTypes = (adBSTR,adChar,adLongVarChar,adLongVarWChar,adVarChar,adVarWChar,adWChar,adGUID)
adoBinaryTypes = (adBinary, adLongVarBinary, adVarBinary)
adoDateTimeTypes = (adDBTime, adDBTimeStamp, adDate, adDBDate)
# Required DBAPI type specifiers
STRING = _DbType(adoStringTypes)
BINARY = _DbType(adoBinaryTypes)
NUMBER = _DbType((adBoolean,) + adoIntegerTypes + adoLongTypes + adoExactNumericTypes + adoApproximateNumericTypes)
DATETIME = _DbType(adoDateTimeTypes)
# Not very useful for SQL Server, as normal row ids are usually just integers.
ROWID = _DbType(adoRowIdTypes)
# Mapping ADO data types to Python objects.
def _convert_to_python(variant, adType):
if variant is None:
return None
return _variantConversions[adType](variant)
def _cvtDecimal(variant):
return _convertNumberWithCulture(variant, decimal.Decimal)
def _cvtFloat(variant):
return _convertNumberWithCulture(variant, float)
def _convertNumberWithCulture(variant, f):
try:
return f(variant)
except (ValueError,TypeError,decimal.InvalidOperation):
try:
europeVsUS = str(variant).replace(",",".")
return f(europeVsUS)
except (ValueError,TypeError): pass
def _cvtComDate(comDate):
date_as_float = float(comDate)
day_count = int(date_as_float)
fraction_of_day = abs(date_as_float - day_count)
return (datetime.datetime.fromordinal(day_count + _ordinal_1899_12_31) +
datetime.timedelta(milliseconds=fraction_of_day * _milliseconds_per_day))
_variantConversions = MultiMap(
{
adoDateTimeTypes : _cvtComDate,
adoExactNumericTypes: _cvtDecimal,
adoApproximateNumericTypes: _cvtFloat,
(adBoolean,): bool,
adoLongTypes+adoRowIdTypes : long,
adoIntegerTypes: int,
adoBinaryTypes: buffer,
},
lambda x: x)
# Mapping Python data types to ADO type codes
def _ado_type(data):
if isinstance(data, basestring):
return adBSTR
return _map_to_adotype[type(data)]
_map_to_adotype = {
buffer: adBinary,
float: adDouble,
int: adInteger,
long: adBigInt,
bool: adBoolean,
decimal.Decimal: adDecimal,
datetime.date: adDate,
datetime.datetime: adDate,
datetime.time: adDate,
}
| Python |
from django.db.backends import BaseDatabaseOperations
import datetime
import time
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "sqlserver_ado.compiler"
def date_extract_sql(self, lookup_type, field_name):
return "DATEPART(%s, %s)" % (lookup_type, self.quote_name(field_name))
def date_trunc_sql(self, lookup_type, field_name):
quoted_field_name = self.quote_name(field_name)
if lookup_type == 'year':
return "Convert(datetime, Convert(varchar, DATEPART(year, %s)) + '/01/01')" % quoted_field_name
if lookup_type == 'month':
return "Convert(datetime, Convert(varchar, DATEPART(year, %s)) + '/' + Convert(varchar, DATEPART(month, %s)) + '/01')" %\
(quoted_field_name, quoted_field_name)
if lookup_type == 'day':
return "Convert(datetime, Convert(varchar(12), %s))" % quoted_field_name
def last_insert_id(self, cursor, table_name, pk_name):
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [self.quote_name(table_name)])
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import smart_unicode
return (
smart_unicode(x).\
replace("\\", "\\\\").\
replace("%", "\%").\
replace("_", "\_").\
replace("[", "\[").\
replace("]", "\]")
)
def quote_name(self, name):
if name.startswith('[') and name.endswith(']'):
return name # already quoted
return '[%s]' % name
def random_function_sql(self):
return 'RAND()'
def regex_lookup(self, lookup_type):
# Case sensitivity
match_option = {'iregex':0, 'regex':1}[lookup_type]
return "dbo.REGEXP_LIKE(%%s, %%s, %s)=1" % (match_option,)
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
Originally taken from django-pyodbc project.
"""
if not tables:
return list()
qn = self.quote_name
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY; use DELETE instead.
# (which is slow)
from django.db import connection
cursor = connection.cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % qn(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = dict()
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE IN ('CHECK', 'FOREIGN KEY')")
fks = cursor.fetchall()
sql_list = list()
# Turn off constraints.
sql_list.extend(['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % (
qn(fk[0]), qn(fk[1])) for fk in fks if fk[0] is not None and fk[1] is not None])
# Delete data from tables.
sql_list.extend(['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(qn(t))
) for t in tables])
# Reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(qn(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
# Turn constraints back on.
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % (
qn(fk[0]), qn(fk[1])) for fk in fks if fk[0] is not None and fk[1] is not None])
return sql_list
def tablespace_sql(self, tablespace, inline=False):
return "ON %s" % self.quote_name(tablespace)
def value_to_db_datetime(self, value):
if value is None:
return None
if value.tzinfo is not None:
raise ValueError("SQL Server 2005 does not support timezone-aware datetimes.")
# SQL Server 2005 doesn't support microseconds
return value.replace(microsecond=0)
def value_to_db_time(self, value):
# MS SQL 2005 doesn't support microseconds
#...but it also doesn't really suport bare times
if value is None:
return None
return value.replace(microsecond=0)
def value_to_db_decimal(self, value, max_digits, decimal_places):
if value is None or value == '':
return None
return value # Should be a decimal type (or string)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999)
return [first, second]
| Python |
"""Microsoft SQL Server database backend for Django."""
from django.db.backends import BaseDatabaseWrapper, BaseDatabaseFeatures, BaseDatabaseValidation, BaseDatabaseClient
from django.db.backends.signals import connection_created
from django.core.exceptions import ImproperlyConfigured
import dbapi as Database
from introspection import DatabaseIntrospection
from creation import DatabaseCreation
from operations import DatabaseOperations
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
class DatabaseFeatures(BaseDatabaseFeatures):
uses_custom_query_class = True
# IP Address recognizer taken from:
# http://mail.python.org/pipermail/python-list/2006-March/375505.html
def _looks_like_ipaddress(address):
dots = address.split(".")
if len(dots) != 4:
return False
for item in dots:
if not 0 <= int(item) <= 255:
return False
return True
def connection_string_from_settings():
from django.conf import settings
return make_connection_string(settings)
def make_connection_string(settings):
class wrap(object):
def __init__(self, mapping):
self._dict = mapping
def __getattr__(self, name):
d = self._dict
result = None
if hasattr(d, "get"):
if d.has_key(name):
result = d.get(name)
else:
result = d.get('DATABASE_' + name)
elif hasattr(d, 'DATABASE_' + name):
result = getattr(d, 'DATABASE_' + name)
else:
result = getattr(d, name)
return result
settings = wrap(settings)
db_name = settings.NAME.strip()
db_host = settings.HOST or '127.0.0.1'
if len(db_name) == 0:
raise ImproperlyConfigured("You need to specify a DATABASE NAME in your Django settings file.")
# Connection strings courtesy of:
# http://www.connectionstrings.com/?carrier=sqlserver
# If a port is given, force a TCP/IP connection. The host should be an IP address in this case.
if settings.PORT != '':
if not _looks_like_ipaddress(db_host):
raise ImproperlyConfigured("When using DATABASE PORT, DATABASE HOST must be an IP address.")
try:
port = int(settings.PORT)
except ValueError:
raise ImproperlyConfigured("DATABASE PORT must be a number.")
datasource = '%s,%i;Network Library=DBMSSOCN' % (db_host, port)
# If no user is specified, use integrated security.
if settings.USER != '':
auth_string = "UID=%s;PWD=%s" % (settings.USER, settings.PASSWORD)
else:
auth_string = "Integrated Security=SSPI"
parts = [
"PROVIDER=SQLOLEDB",
"DATA SOURCE=%s" % (db_host,),
"Initial Catalog=%s" % (db_name,),
auth_string
]
options = settings.OPTIONS
if options:
if 'use_mars' in options and options['use_mars']:
parts.append("MultipleActiveResultSets=true")
if 'extra_params' in options:
parts.append(options['extra_params'])
if 'provider' in options:
parts[0] = 'PROVIDER=%s' % (options['provider'],)
return ";".join(parts)
class DatabaseWrapper(BaseDatabaseWrapper):
operators = {
"exact": "= %s",
"iexact": "LIKE %s ESCAPE '\\'",
"contains": "LIKE %s ESCAPE '\\'",
"icontains": "LIKE %s ESCAPE '\\'",
"gt": "> %s",
"gte": ">= %s",
"lt": "< %s",
"lte": "<= %s",
"startswith": "LIKE %s ESCAPE '\\'",
"endswith": "LIKE %s ESCAPE '\\'",
"istartswith": "LIKE %s ESCAPE '\\'",
"iendswith": "LIKE %s ESCAPE '\\'",
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
try:
# django < 1.3
self.features = DatabaseFeatures()
except TypeError:
# django >= 1.3
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations()
self.client = BaseDatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
try:
self.command_timeout = int(self.settings_dict.get('COMMAND_TIMEOUT', 30))
except ValueError:
self.command_timeout = 30
def _cursor(self):
if self.connection is None:
self.connection = Database.connect(
make_connection_string(self.settings_dict),
self.command_timeout
)
connection_created.send(sender=self.__class__)
return Database.Cursor(self.connection)
| Python |
"""This module provides SQL Server specific fields for Django models."""
from django.db.models import AutoField, ForeignKey, BigIntegerField
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
__all__ = (
'BigAutoField',
'BigForeignKey',
'BigIntegerField',
)
class BigAutoField(AutoField):
"""A bigint IDENTITY field"""
def get_internal_type(self):
return "BigAutoField"
def to_python(self, value):
if value is None:
return value
try:
return long(value)
except (TypeError, ValueError):
raise ValidationError(
_("This value must be a long."))
def get_db_prep_value(self, value, connection=None, prepared=False):
if value is None:
return None
return long(value)
class BigForeignKey(ForeignKey):
"""A ForeignKey field that points to a BigAutoField or BigIntegerField"""
def db_type(self, connection=None):
try:
return BigIntegerField().db_type(connection=connection)
except AttributeError:
return BigIntegerField().db_type()
| Python |
# ADO enumerated constants documented on MSDN:
# http://msdn.microsoft.com/en-us/library/ms678353(VS.85).aspx
# IsolationLevelEnum
adXactUnspecified = -1
adXactBrowse = 0x100
adXactChaos = 0x10
adXactCursorStability = 0x1000
adXactIsolated = 0x100000
adXactReadCommitted = 0x1000
adXactReadUncommitted = 0x100
adXactRepeatableRead = 0x10000
adXactSerializable = 0x100000
# CursorLocationEnum
adUseClient = 3
adUseServer = 2
# CursorTypeEnum
adOpenDynamic = 2
adOpenForwardOnly = 0
adOpenKeyset = 1
adOpenStatic = 3
adOpenUnspecified = -1
# CommandTypeEnum
adCmdText = 1
adCmdStoredProc = 4
# ParameterDirectionEnum
adParamInput = 1
adParamInputOutput = 3
adParamOutput = 2
adParamReturnValue = 4
adParamUnknown = 0
# ObjectStateEnum
adStateClosed = 0
adStateOpen = 1
adStateConnecting = 2
adStateExecuting = 4
adStateFetching = 8
# FieldAttributeEnum
adFldMayBeNull = 0x40
# ConnectModeEnum
adModeUnknown = 0
adModeRead = 1
adModeWrite = 2
adModeReadWrite = 3
adModeShareDenyRead = 4
adModeShareDenyWrite = 8
adModeShareExclusive = 12
adModeShareDenyNone = 16
adModeRecursive = 0x400000
# XactAttributeEnum
adXactCommitRetaining = 131072
adXactAbortRetaining = 262144
ado_error_TIMEOUT = -2147217871
# DataTypeEnum - ADO Data types documented at:
# http://msdn2.microsoft.com/en-us/library/ms675318.aspx
adArray = 0x2000
adEmpty = 0x0
adBSTR = 0x8
adBigInt = 0x14
adBinary = 0x80
adBoolean = 0xb
adChapter = 0x88
adChar = 0x81
adCurrency = 0x6
adDBDate = 0x85
adDBTime = 0x86
adDBTimeStamp = 0x87
adDate = 0x7
adDecimal = 0xe
adDouble = 0x5
adError = 0xa
adFileTime = 0x40
adGUID = 0x48
adIDispatch = 0x9
adIUnknown = 0xd
adInteger = 0x3
adLongVarBinary = 0xcd
adLongVarChar = 0xc9
adLongVarWChar = 0xcb
adNumeric = 0x83
adPropVariant = 0x8a
adSingle = 0x4
adSmallInt = 0x2
adTinyInt = 0x10
adUnsignedBigInt = 0x15
adUnsignedInt = 0x13
adUnsignedSmallInt = 0x12
adUnsignedTinyInt = 0x11
adUserDefined = 0x84
adVarBinary = 0xCC
adVarChar = 0xC8
adVarNumeric = 0x8B
adVarWChar = 0xCA
adVariant = 0xC
adWChar = 0x82
# Additional constants used by introspection but not ADO itself
AUTO_FIELD_MARKER = -1000
adTypeNames = {
adBSTR: 'adBSTR',
adBigInt: 'adBigInt',
adBinary: 'adBinary',
adBoolean: 'adBoolean',
adChapter: 'adChapter',
adChar: 'adChar',
adCurrency: 'adCurrency',
adDBDate: 'adDBDate',
adDBTime: 'adDBTime',
adDBTimeStamp: 'adDBTimeStamp',
adDate: 'adDate',
adDecimal: 'adDecimal',
adDouble: 'adDouble',
adEmpty: 'adEmpty',
adError: 'adError',
adFileTime: 'adFileTime',
adGUID: 'adGUID',
adIDispatch: 'adIDispatch',
adIUnknown: 'adIUnknown',
adInteger: 'adInteger',
adLongVarBinary: 'adLongVarBinary',
adLongVarChar: 'adLongVarChar',
adLongVarWChar: 'adLongVarWChar',
adNumeric: 'adNumeric',
adPropVariant: 'adPropVariant',
adSingle: 'adSingle',
adSmallInt: 'adSmallInt',
adTinyInt: 'adTinyInt',
adUnsignedBigInt: 'adUnsignedBigInt',
adUnsignedInt: 'adUnsignedInt',
adUnsignedSmallInt: 'adUnsignedSmallInt',
adUnsignedTinyInt: 'adUnsignedTinyInt',
adUserDefined: 'adUserDefined',
adVarBinary: 'adVarBinary',
adVarChar: 'adVarChar',
adVarNumeric: 'adVarNumeric',
adVarWChar: 'adVarWChar',
adVariant: 'adVariant',
adWChar: 'adWChar',
}
def ado_type_name(ado_type):
return adTypeNames.get(ado_type, 'unknown type ('+str(ado_type)+')')
# Error codes to names
adoErrors= {
0xe7b :'adErrBoundToCommand',
0xe94 :'adErrCannotComplete',
0xea4 :'adErrCantChangeConnection',
0xc94 :'adErrCantChangeProvider',
0xe8c :'adErrCantConvertvalue',
0xe8d :'adErrCantCreate',
0xea3 :'adErrCatalogNotSet',
0xe8e :'adErrColumnNotOnThisRow',
0xd5d :'adErrDataConversion',
0xe89 :'adErrDataOverflow',
0xe9a :'adErrDelResOutOfScope',
0xea6 :'adErrDenyNotSupported',
0xea7 :'adErrDenyTypeNotSupported',
0xcb3 :'adErrFeatureNotAvailable',
0xea5 :'adErrFieldsUpdateFailed',
0xc93 :'adErrIllegalOperation',
0xcae :'adErrInTransaction',
0xe87 :'adErrIntegrityViolation',
0xbb9 :'adErrInvalidArgument',
0xe7d :'adErrInvalidConnection',
0xe7c :'adErrInvalidParamInfo',
0xe82 :'adErrInvalidTransaction',
0xe91 :'adErrInvalidURL',
0xcc1 :'adErrItemNotFound',
0xbcd :'adErrNoCurrentRecord',
0xe83 :'adErrNotExecuting',
0xe7e :'adErrNotReentrant',
0xe78 :'adErrObjectClosed',
0xd27 :'adErrObjectInCollection',
0xd5c :'adErrObjectNotSet',
0xe79 :'adErrObjectOpen',
0xbba :'adErrOpeningFile',
0xe80 :'adErrOperationCancelled',
0xe96 :'adErrOutOfSpace',
0xe88 :'adErrPermissionDenied',
0xe9e :'adErrPropConflicting',
0xe9b :'adErrPropInvalidColumn',
0xe9c :'adErrPropInvalidOption',
0xe9d :'adErrPropInvalidValue',
0xe9f :'adErrPropNotAllSettable',
0xea0 :'adErrPropNotSet',
0xea1 :'adErrPropNotSettable',
0xea2 :'adErrPropNotSupported',
0xbb8 :'adErrProviderFailed',
0xe7a :'adErrProviderNotFound',
0xbbb :'adErrReadFile',
0xe93 :'adErrResourceExists',
0xe92 :'adErrResourceLocked',
0xe97 :'adErrResourceOutOfScope',
0xe8a :'adErrSchemaViolation',
0xe8b :'adErrSignMismatch',
0xe81 :'adErrStillConnecting',
0xe7f :'adErrStillExecuting',
0xe90 :'adErrTreePermissionDenied',
0xe8f :'adErrURLDoesNotExist',
0xe99 :'adErrURLNamedRowDoesNotExist',
0xe98 :'adErrUnavailable',
0xe84 :'adErrUnsafeOperation',
0xe95 :'adErrVolumeNotFound',
0xbbc :'adErrWriteFile'
}
| Python |
from django.db.backends import BaseDatabaseIntrospection
import ado_consts
class DatabaseIntrospection(BaseDatabaseIntrospection):
def get_table_list(self, cursor):
"Return a list of table and view names in the current database."
cursor.execute("SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE' UNION SELECT TABLE_NAME FROM INFORMATION_SCHEMA.VIEWS")
return [row[0] for row in cursor.fetchall()]
def _is_auto_field(self, cursor, table_name, column_name):
"""Check if a column is an identity column.
See: http://msdn2.microsoft.com/en-us/library/ms174968.aspx
"""
sql = "SELECT COLUMNPROPERTY(OBJECT_ID(N'%s'), N'%s', 'IsIdentity')" % \
(table_name, column_name)
cursor.execute(sql)
return cursor.fetchone()[0]
def get_table_description(self, cursor, table_name, identity_check=True):
"""Return a description of the table, with DB-API cursor.description interface.
The 'auto_check' parameter has been added to the function argspec.
If set to True, the function will check each of the table's fields for the
IDENTITY property (the IDENTITY property is the MSSQL equivalent to an AutoField).
When a field is found with an IDENTITY property, it is given a custom field number
of SQL_AUTOFIELD, which maps to the 'AutoField' value in the DATA_TYPES_REVERSE dict.
"""
cursor.execute("SELECT * FROM [%s] where 1=0" % (table_name))
columns = cursor.description
items = list()
for column in columns:
column = list(column) # Convert tuple to list
if identity_check and self._is_auto_field(cursor, table_name, column[0]):
column[1] = ado_consts.AUTO_FIELD_MARKER
items.append(column)
return items
def _name_to_index(self, cursor, table_name):
"""Return a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name, False))])
def get_relations(self, cursor, table_name):
source_field_dict = self._name_to_index(cursor, table_name)
sql = """
select
column_name = fk_cols.column_name,
referenced_table_name = pk.table_name,
referenced_column_name = pk_cols.column_name
from information_schema.referential_constraints ref_const
join information_schema.table_constraints fk
on ref_const.constraint_catalog = fk.constraint_catalog
and ref_const.constraint_schema = fk.constraint_schema
and ref_const.constraint_name = fk.constraint_name
and fk.constraint_type = 'foreign key'
join information_schema.table_constraints pk
on ref_const.unique_constraint_catalog = pk.constraint_catalog
and ref_const.unique_constraint_schema = pk.constraint_schema
and ref_const.unique_constraint_name = pk.constraint_name
and pk.constraint_type = 'primary key'
join information_schema.key_column_usage fk_cols
on ref_const.constraint_name = fk_cols.constraint_name
join information_schema.key_column_usage pk_cols
on pk.constraint_name = pk_cols.constraint_name
where
fk.table_name = %s"""
cursor.execute(sql,[table_name])
relations = cursor.fetchall()
relation_map = dict()
for source_column, target_table, target_column in relations:
target_field_dict = self._name_to_index(cursor, target_table)
target_index = target_field_dict[target_column]
source_index = source_field_dict[source_column]
relation_map[source_index] = (target_index, target_table)
return relation_map
def get_indexes(self, cursor, table_name):
# Returns a dictionary of fieldname -> infodict for the given table,
# where each infodict is in the format:
# {'primary_key': boolean representing whether it's the primary key,
# 'unique': boolean representing whether it's a unique index}
sql = """
select
C.name as [column_name],
IX.is_unique as [unique],
IX.is_primary_key as [primary_key]
from
sys.tables T
join sys.index_columns IC on IC.object_id = T.object_id
join sys.columns C on C.object_id = T.object_id and C.column_id = IC.column_id
join sys.indexes Ix on Ix.object_id = T.object_id and Ix.index_id = IC.index_id
where
T.name = %s
and (Ix.is_unique=1 or Ix.is_primary_key=1)
-- Omit multi-column keys
and not exists (
select *
from sys.index_columns cols
where
cols.object_id = T.object_id
and cols.index_id = IC.index_id
and cols.key_ordinal > 1
)
"""
cursor.execute(sql,[table_name])
constraints = cursor.fetchall()
indexes = dict()
for column_name, unique, primary_key in constraints:
indexes[column_name.lower()] = {"primary_key":primary_key, "unique":unique}
return indexes
data_types_reverse = {
ado_consts.AUTO_FIELD_MARKER: 'AutoField',
ado_consts.adBoolean: 'BooleanField',
ado_consts.adChar: 'CharField',
ado_consts.adWChar: 'CharField',
ado_consts.adDecimal: 'DecimalField',
ado_consts.adNumeric: 'DecimalField',
ado_consts.adDBTimeStamp: 'DateTimeField',
ado_consts.adDouble: 'FloatField',
ado_consts.adSingle: 'FloatField',
ado_consts.adInteger: 'IntegerField',
ado_consts.adBigInt: 'IntegerField',
#ado_consts.adBigInt: 'BigIntegerField',
ado_consts.adSmallInt: 'IntegerField',
ado_consts.adTinyInt: 'IntegerField',
ado_consts.adVarChar: 'CharField',
ado_consts.adVarWChar: 'CharField',
ado_consts.adLongVarWChar: 'TextField',
ado_consts.adLongVarChar: 'TextField',
}
| Python |
from django.db import connection
from django.db.models.fields import *
from south.db import generic
class DatabaseOperations(generic.DatabaseOperations):
"""
django-mssql (sql_server.mssql) implementation of database operations.
"""
add_column_string = 'ALTER TABLE %s ADD %s;'
alter_string_set_type = 'ALTER COLUMN %(column)s %(type)s'
allows_combined_alters = False
delete_column_string = 'ALTER TABLE %s DROP COLUMN %s;'
def callproc(self, procname, params=None):
"""Call a stored procedure with the given parameter values"""
cursor = connection.cursor()
cursor.callproc( procname, params)
def create_table(self, table_name, fields):
# Tweak stuff as needed
for name,f in fields:
if isinstance(f, BooleanField):
if f.default == True:
f.default = 1
if f.default == False:
f.default = 0
# Run
generic.DatabaseOperations.create_table(self, table_name, fields)
def rename_column(self, table_name, old, new):
"""
Renames the column 'old' from the table 'table_name' to 'new'.
"""
# intentionally not quoting names
self.callproc('sp_rename', (table_name + '.' + old, new, 'COLUMN'))
| Python |
import os.path
VERSION = (1, 0, 0, 'dev')
def get_version():
"""
Return the version as a string. If this is flagged as a development
release and mercurial can be loaded the specifics about the changeset
will be appended to the version string.
"""
if 'dev' in VERSION:
try:
from mercurial import hg, ui
repo_path = os.path.join(os.path.dirname(__file__), '..')
repo = hg.repository(ui.ui(), repo_path)
ctx = repo['tip']
build_info = 'dev %s %s:%s' % (ctx.branch(), ctx.rev(), str(ctx))
except:
# mercurial module missing or repository not found
build_info = 'dev-unknown'
v = VERSION[:VERSION.index('dev')] + (build_info,)
return '.'.join(map(str, v))
| Python |
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
help = "Launches SQL Server Management Studio (on Windows)."
requires_model_validation = False
def handle_noargs(self, **options):
from django.conf import settings
import os
args = ['-nosplash', '-E']
host = settings.DATABASE_OPTIONS.get('host', settings.DATABASE_HOST)
db = settings.DATABASE_OPTIONS.get('db', settings.DATABASE_NAME)
# user = settings.DATABASE_OPTIONS.get('user', settings.DATABASE_USER)
# passwd = settings.DATABASE_OPTIONS.get('passwd', settings.DATABASE_PASSWORD)
# port = settings.DATABASE_OPTIONS.get('port', settings.DATABASE_PORT)
if host:
args += ['-S', host]
if db:
args += ["-d", db]
os.execvp('sqlwb.exe', args)
| Python |
import util
import types
from xml.sax.handler import *
StartDocument = 0
EndDocument = 1
StartElement = 2
EndElement = 3
IgnorableWhitespace = 4
Characters = 5
def storeInDict(target, key, value):
target[key] = value
def storeInExistingAttribute(target, key, value, converter=None):
if hasattr(target, key):
if converter:
setattr(target, key, converter(value))
else:
setattr(target, key, value)
def storeInAttribute(target, key, value):
setattr(target, key, value)
def readAttributes(target, attributes):
for k in attributes.getNames():
setattr(target, k, attributes[k])
def stripWhitespace(text):
lines = [x.strip() for x in text.replace('\r', '').split('\n')]
return ' '.join(lines).strip()
class Return(object):
def __init__(self, value=None):
self.value = value
class ParseState(object):
def __init__(self, output):
self.token = None
self.output = output
self.current = None
self.result = None
self.stack = []
def fillDictionary(self, target, storer = storeInDict):
depth = 0
key = None
buffer = None
while True:
yield
type = self.token[0]
if type == StartElement:
depth += 1
key = self.token[1]
buffer = ""
elif type == EndElement:
if key != None:
storer(target, key, buffer)
key = None
buffer = None
depth -= 1
if depth < 0:
break
elif type == IgnorableWhitespace:
if key != None:
buffer += self.token[1]
elif type == Characters:
if key != None:
buffer += self.token[1]
def readContent(self, handler, result=None):
buffer = ""
while True:
type = self.token[0]
if type == IgnorableWhitespace:
buffer += self.token[1]
elif type == Characters:
buffer += self.token[1]
elif type == EndElement:
break
yield
if isinstance(handler, types.ListType):
handler[0] = buffer
else:
handler(buffer)
if result:
yield Return(result)
def readInts(self, buffer):
def myHandler(text):
values = stripWhitespace(text).split(" ")
buffer.extend(map(int, values))
return self.readContent(myHandler)
def readFloats(self, buffer):
def myHandler(text):
values = stripWhitespace(text).split(" ")
buffer.extend(map(float, values))
return self.readContent(myHandler)
class ColladaParseHandler(ContentHandler):
def __init__(self, state):
self.state = state
self.state.stack = []
self.state.current = self.state.main()
self._debug = False
def _getDebugState(self):
return "TOKEN: %r STACK: %s" % (self.state.token, ",".join([util.lifetime.best_repr(x) for x in self.state.stack]))
def _push(self, handler):
if self._debug:
print "+%s" % (util.lifetime.best_repr(handler),)
self.state.stack.append(self.state.current)
self.state.current = handler
if len(self.state.stack) > 512:
raise Exception("Handler stack overflow", self._getDebugState())
def _pop(self):
if self._debug:
print "-%s" % (util.lifetime.best_repr(self.state.current),)
if len(self.state.stack) > 0:
self.state.current = self.state.stack.pop()
else:
self.state.current = None
def _step(self):
newHandler = None
try:
if self.state.result != None:
self.state.current.send(self.state.result)
self.state.result = None
else:
newHandler = self.state.current.next()
except StopIteration:
self._pop()
except Exception, e:
if self._debug:
print e, self._getDebugState()
raise
if newHandler != None:
if isinstance(newHandler, Return):
self.state.result = newHandler.value
self._pop()
return True
else:
self._push(newHandler)
return False
else:
return True
def token(self, type, *args):
self.state.output._tokensRead += 1
t = (type,) + args
self.state.token = t
if (type == StartElement) and self._debug:
print "<" + args[0] + ">"
elif (type == EndElement) and self._debug:
print "</" + args[0] + ">"
while True:
r = self._step()
if r:
break
def startDocument(self):
self.token(StartDocument)
def endDocument(self):
self.token(EndDocument)
def startElement(self, name, attributes):
self.state.output._elementsRead += 1
self.token(StartElement, name, attributes)
def endElement(self, name):
self.token(EndElement, name)
def ignorableWhitespace(self, whitespace):
self.token(IgnorableWhitespace, whitespace)
def characters(self, content):
self.token(Characters, content) | Python |
import util
import types
from xml.sax.handler import *
StartDocument = 0
EndDocument = 1
StartElement = 2
EndElement = 3
IgnorableWhitespace = 4
Characters = 5
def storeInDict(target, key, value):
target[key] = value
def storeInExistingAttribute(target, key, value, converter=None):
if hasattr(target, key):
if converter:
setattr(target, key, converter(value))
else:
setattr(target, key, value)
def storeInAttribute(target, key, value):
setattr(target, key, value)
def readAttributes(target, attributes):
for k in attributes.getNames():
setattr(target, k, attributes[k])
def stripWhitespace(text):
lines = [x.strip() for x in text.replace('\r', '').split('\n')]
return ' '.join(lines).strip()
class Return(object):
def __init__(self, value=None):
self.value = value
class ParseState(object):
def __init__(self, output):
self.token = None
self.output = output
self.current = None
self.result = None
self.stack = []
def fillDictionary(self, target, storer = storeInDict):
depth = 0
key = None
buffer = None
while True:
yield
type = self.token[0]
if type == StartElement:
depth += 1
key = self.token[1]
buffer = ""
elif type == EndElement:
if key != None:
storer(target, key, buffer)
key = None
buffer = None
depth -= 1
if depth < 0:
break
elif type == IgnorableWhitespace:
if key != None:
buffer += self.token[1]
elif type == Characters:
if key != None:
buffer += self.token[1]
def readContent(self, handler, result=None):
buffer = ""
while True:
type = self.token[0]
if type == IgnorableWhitespace:
buffer += self.token[1]
elif type == Characters:
buffer += self.token[1]
elif type == EndElement:
break
yield
if isinstance(handler, types.ListType):
handler[0] = buffer
else:
handler(buffer)
if result:
yield Return(result)
def readInts(self, buffer):
def myHandler(text):
values = stripWhitespace(text).split(" ")
buffer.extend(map(int, values))
return self.readContent(myHandler)
def readFloats(self, buffer):
def myHandler(text):
values = stripWhitespace(text).split(" ")
buffer.extend(map(float, values))
return self.readContent(myHandler)
class ColladaParseHandler(ContentHandler):
def __init__(self, state):
self.state = state
self.state.stack = []
self.state.current = self.state.main()
self._debug = False
def _getDebugState(self):
return "TOKEN: %r STACK: %s" % (self.state.token, ",".join([util.lifetime.best_repr(x) for x in self.state.stack]))
def _push(self, handler):
if self._debug:
print "+%s" % (util.lifetime.best_repr(handler),)
self.state.stack.append(self.state.current)
self.state.current = handler
if len(self.state.stack) > 512:
raise Exception("Handler stack overflow", self._getDebugState())
def _pop(self):
if self._debug:
print "-%s" % (util.lifetime.best_repr(self.state.current),)
if len(self.state.stack) > 0:
self.state.current = self.state.stack.pop()
else:
self.state.current = None
def _step(self):
newHandler = None
try:
if self.state.result != None:
self.state.current.send(self.state.result)
self.state.result = None
else:
newHandler = self.state.current.next()
except StopIteration:
self._pop()
except Exception, e:
if self._debug:
print e, self._getDebugState()
raise
if newHandler != None:
if isinstance(newHandler, Return):
self.state.result = newHandler.value
self._pop()
return True
else:
self._push(newHandler)
return False
else:
return True
def token(self, type, *args):
self.state.output._tokensRead += 1
t = (type,) + args
self.state.token = t
if (type == StartElement) and self._debug:
print "<" + args[0] + ">"
elif (type == EndElement) and self._debug:
print "</" + args[0] + ">"
while True:
r = self._step()
if r:
break
def startDocument(self):
self.token(StartDocument)
def endDocument(self):
self.token(EndDocument)
def startElement(self, name, attributes):
self.state.output._elementsRead += 1
self.token(StartElement, name, attributes)
def endElement(self, name):
self.token(EndElement, name)
def ignorableWhitespace(self, whitespace):
self.token(IgnorableWhitespace, whitespace)
def characters(self, content):
self.token(Characters, content) | Python |
#!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
print "status: ",status,reason
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
| Python |
from distutils.dir_util import copy_tree
copy_tree('.', 'snapshot', verbose=True)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from apiclient.discovery import build
from apiclient.oauth import FlowThreeLegged
import datetime
import httplib2
import logging
import oauth_wrap
import optparse
import os
import sys
def load_properties_file(path):
properties = {}
for line in open(path):
line = line.strip()
if line.startswith('#'):
continue
key,value = line.split('=')
properties[key.strip()] = value.strip()
return properties
def save_properties(consumer_key, consumer_secret, token_key, token_secret, path):
file = open(path, 'w')
# File format and order is based on oacurl.java's defaults
now = datetime.datetime.today()
now_string = now.strftime('%a %b %d %H:%m:%S %Z %Y')
file.write('#%s\n' % now_string)
file.write('consumerSecret=%s\n' % consumer_secret)
file.write('accessToken=%s\n' % token_key)
file.write('consumerKey=%s\n' % consumer_key)
file.write('accessTokenSecret=%s\n' % token_secret)
file.close()
def fetch(url):
logging.debug('Now fetching: %s' % url)
path = os.path.expanduser('~/.oacurl.properties')
if not os.path.exists(path):
logging.debug('User is not logged in.')
print 'You are not logged in'
sys.exit(1)
properties = load_properties_file(path)
oauth_parameters = {
'consumer_key': properties['consumerKey'],
'consumer_secret' : properties['consumerSecret'],
'oauth_token' : properties['accessToken'],
'oauth_token_secret':properties['accessTokenSecret']}
http = oauth_wrap.get_authorised_http(oauth_parameters)
response, content = http.request(url)
logging.debug(response)
logging.debug(content)
return response,content
def buzz_login():
buzz_discovery = build("buzz", "v1").auth_discovery()
flow = FlowThreeLegged(buzz_discovery,
consumer_key='anonymous',
consumer_secret='anonymous',
user_agent='google-api-client-python-buzz-cmdline/1.0',
domain='anonymous',
scope='https://www.googleapis.com/auth/buzz',
xoauth_displayname='oacurl.py')
authorize_url = flow.step1_get_authorize_url()
print 'Go to the following link in your browser:'
print authorize_url
print
accepted = 'n'
while accepted.lower() == 'n':
accepted = raw_input('Have you authorized me? (y/n) ')
verification = raw_input('What is the verification code? ').strip()
credentials = flow.step2_exchange(verification)
path = os.path.expanduser('~/.oacurl.properties')
save_properties('anonymous', 'anonymous', credentials.token.key, credentials.token.secret,path)
def generic_login():
#TODO(ade) Implement support for other services
print 'Support for services other than Buzz is not implemented yet. Sorry.'
def login(options):
if options.buzz:
buzz_login()
else:
generic_login()
def get_command(args):
if args and args[0] == 'login':
return 'login'
if args and args[0] == 'fetch':
return 'fetch'
return None
def configure_logging(options):
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
def main():
usage = '''Usage: python %prog [options] fetch <url>
Example: python %prog -v fetch "https://www.googleapis.com/buzz/v1/people/@me/@self?alt=json&pp=1"
'''
parser = optparse.OptionParser(usage=usage)
parser.set_defaults(verbose=False)
parser.add_option('-v', '--verbose', action='store_true', dest='verbose')
parser.add_option('-q', '--quiet', action='store_false', dest='verbose')
parser.add_option('--buzz', action='store_true', dest='buzz')
(options, args) = parser.parse_args()
configure_logging(options)
logging.debug('Options: %s and Args: %s' % (str(options), str(args)))
command = get_command(args)
if not command:
parser.error('Invalid arguments')
return
if command == 'fetch':
response, content = fetch(args[1])
print response
print content
return
if command == 'login':
login(options)
if __name__ == '__main__':
main() | Python |
#!/usr/bin/env python
import glob
import logging
import os
import sys
import unittest
from trace import fullmodname
APP_ENGINE_PATH='../google_appengine'
# Conditional import of cleanup function
try:
from tests.utils import cleanup
except:
def cleanup():
pass
# Ensure current working directory is in path
sys.path.insert(0, os.getcwd())
sys.path.insert(0, APP_ENGINE_PATH)
def build_suite(folder, verbosity):
# find all of the test modules
top_level_modules = map(fullmodname, glob.glob(os.path.join(folder, 'test_*.py')))
# TODO(ade) Verify that this works on Windows. If it doesn't then switch to os.walk instead
lower_level_modules = map(fullmodname, glob.glob(os.path.join(folder, '*/test_*.py')))
modules = top_level_modules + lower_level_modules
if verbosity > 0:
print "Running the tests found in the following modules:"
print modules
# load all of the tests into a suite
try:
return unittest.TestLoader().loadTestsFromNames(modules)
except Exception, exception:
# attempt to produce a more specific message
for module in modules:
__import__(module)
raise
def run(test_folder_name, verbosity, exit_on_failure):
# Build and run the tests in test_folder_name
tests = build_suite(test_folder_name, verbosity)
result = unittest.TextTestRunner(verbosity=verbosity).run(tests)
if exit_on_failure and not result.wasSuccessful():
sys.exit(1)
cleanup()
def main():
if '--help' in sys.argv:
print 'Usage: python runtests.py [-q|--quiet|-v|--verbose] [--exit_on_failure] [tests|functional_tests|contrib_tests]'
return
verbosity = 1
exit_on_failure = '--exit_on_failure' in sys.argv
if '-q' in sys.argv or '--quiet' in sys.argv:
verbosity = 0
if "-v" in sys.argv or '--verbose' in sys.argv:
verbosity = 2
if verbosity == 0:
logging.disable(logging.CRITICAL)
elif verbosity == 1:
logging.disable(logging.ERROR)
elif verbosity == 2:
logging.basicConfig(level=logging.DEBUG)
import dev_appserver
dev_appserver.fix_sys_path()
# Allow user to run a specific folder of tests
if 'tests' in sys.argv:
run('tests', verbosity, exit_on_failure)
elif 'functional_tests' in sys.argv:
run('functional_tests', verbosity, exit_on_failure)
elif 'contrib_tests' in sys.argv:
run('contrib_tests', verbosity, exit_on_failure)
else:
run('tests', verbosity, exit_on_failure)
run('functional_tests', verbosity, exit_on_failure)
run('contrib_tests', verbosity, exit_on_failure)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import os
import pydoc
import re
import sys
import httplib2
from apiclient.anyjson import simplejson
from apiclient.discovery import build
BASE = 'docs/dyn'
def document(resource, path):
print path
collections = []
for name in dir(resource):
if not "_" in name and callable(getattr(resource, name)) and hasattr(
getattr(resource, name), '__is_resource__'):
collections.append(name)
obj, name = pydoc.resolve(type(resource))
page = pydoc.html.page(
pydoc.describe(obj), pydoc.html.document(obj, name))
for name in collections:
page = re.sub('strong>(%s)<' % name, r'strong><a href="%s">\1</a><' % (path + name + ".html"), page)
for name in collections:
document(getattr(resource, name)(), path + name + ".")
f = open(os.path.join(BASE, path + 'html'), 'w')
f.write(page)
f.close()
def document_api(name, version):
service = build(name, version)
document(service, '%s.%s.' % (name, version))
if __name__ == '__main__':
http = httplib2.Http()
resp, content = http.request('https://www.googleapis.com/discovery/v0.3/directory?preferred=true')
if resp.status == 200:
directory = simplejson.loads(content)['items']
for api in directory:
document_api(api['name'], api['version'])
else:
sys.exit("Failed to load the discovery document.")
| Python |
# Django settings for django_sample project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = 'database.sqlite3'
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_=9hq-$t_uv1ckf&s!y2$9g$1dm*6p1cl%*!^mg=7gr)!zj32d'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'django_sample.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates"
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'templates')
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django_sample.buzz'
)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib2
import oauth2 as oauth
import simplejson
def oauth_wrap(consumer, token, http):
"""
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = oauth_wrap(h)
Grumble. You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request().
"""
request_orig = http.request
signer = oauth.SignatureMethod_HMAC_SHA1()
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
req = oauth.Request.from_consumer_and_token(
consumer, token, http_method=method, http_url=uri)
req.sign_request(signer, consumer, token)
if headers == None:
headers = {}
headers.update(req.to_header())
headers['user-agent'] = 'jcgregorio-test-client'
return request_orig(uri, method, body, headers, redirections,
connection_type)
http.request = new_request
return http
def get_authorised_http(oauth_params):
consumer = oauth.Consumer(oauth_params['consumer_key'],
oauth_params['consumer_secret'])
token = oauth.Token(oauth_params['oauth_token'],
oauth_params['oauth_token_secret'])
# Create a simple monkeypatch for httplib2.Http.request
# just adds in the oauth authorization header and then calls
# the original request().
http = httplib2.Http()
return oauth_wrap(consumer, token, http)
def get_wrapped_http(filename='oauth_token.dat'):
f = open(filename, 'r')
oauth_params = simplejson.loads(f.read())
return get_authorised_http(oauth_params)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for Google API Python client.
Also installs included versions of third party libraries, if those libraries
are not already installed.
"""
import setup_utils
has_setuptools = False
try:
from setuptools import setup
has_setuptools = True
except ImportError:
from distutils.core import setup
packages = [
'apiclient',
'oauth2client',
'apiclient.ext',
'apiclient.contrib',
'apiclient.contrib.buzz',
'apiclient.contrib.latitude',
'apiclient.contrib.moderator',
'uritemplate',
]
install_requires = []
py_modules = []
# (module to test for, install_requires to add if missing, packages to add if missing, py_modules to add if missing)
REQUIREMENTS = [
('httplib2', 'httplib2', 'httplib2', None),
('oauth2', 'oauth2', 'oauth2', None),
('gflags', 'python-gflags', None, ['gflags', 'gflags_validators']),
(['json', 'simplejson', 'django.utils'], 'simplejson', 'simplejson', None)
]
for import_name, requires, package, modules in REQUIREMENTS:
if setup_utils.is_missing(import_name):
if has_setuptools:
install_requires.append(requires)
else:
if package is not None:
packages.append(package)
else:
py_modules.extend(modules)
long_desc = """The Google API Client for Python is a client library for
accessing the Buzz, Moderator, and Latitude APIs."""
setup(name="google-api-python-client",
version="1.0beta2",
description="Google API Client Library for Python",
long_description=long_desc,
author="Joe Gregorio",
author_email="jcgregorio@google.com",
url="http://code.google.com/p/google-api-python-client/",
install_requires=install_requires,
packages=packages,
py_modules=py_modules,
package_data={
'apiclient': ['contrib/*/*.json']
},
scripts=['bin/enable-app-engine-project'],
license="Apache 2.0",
keywords="google api client",
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Topic :: Internet :: WWW/HTTP'])
| Python |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None):
self.method = method
self.url = url
if parameters is not None:
self.update(parameters)
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(self, True)
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
query = parse_qs(base_url.query)
for k, v in self.items():
query.setdefault(k, []).append(v)
url = (base_url.scheme, base_url.netloc, base_url.path, base_url.params,
urllib.urlencode(query, True), base_url.fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if hasattr(value, '__iter__'):
items.extend((key, item) for item in value)
else:
items.append((key, value))
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
items.extend(self._split_url_string(query).items())
encoded_str = urllib.urlencode(sorted(items))
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout,
proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
is_multipart = method == 'POST' and headers.get('Content-Type',
DEFAULT_CONTENT_TYPE) != DEFAULT_CONTENT_TYPE
if body and method == "POST" and not is_multipart:
parameters = dict(parse_qsl(body))
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters)
req.sign_request(self.method, self.consumer, self.token)
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_CONTENT_TYPE)
if is_multipart:
headers.update(req.to_header())
else:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header())
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
version = self._get_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, request):
"""Verify the correct version request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.sign(request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
# HMAC object.
try:
from hashlib import sha1 as sha
except ImportError:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| Python |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import imaplib
class IMAP4_SSL(imaplib.IMAP4_SSL):
"""IMAP wrapper for imaplib.IMAP4_SSL that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
imaplib.IMAP4_SSL.authenticate(self, 'XOAUTH',
lambda x: oauth2.build_xoauth_string(url, consumer, token))
| Python |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import smtplib
import base64
class SMTP(smtplib.SMTP):
"""SMTP wrapper for smtplib.SMTP that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
self.docmd('AUTH', 'XOAUTH %s' % \
base64.b64encode(oauth2.build_xoauth_string(url, consumer, token)))
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ['run']
import BaseHTTPServer
import gflags
import logging
import socket
import sys
from optparse import OptionParser
from client import FlowExchangeError
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('auth_local_webserver', True,
('Run a local web server to handle redirects during '
'OAuth authorization.'))
gflags.DEFINE_string('auth_host_name', 'localhost',
('Host name to use when running a local web server to '
'handle redirects during OAuth authorization.'))
gflags.DEFINE_multi_int('auth_host_port', [8080, 8090],
('Port to use when running a local web server to '
'handle redirects during OAuth authorization.'))
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(s):
"""Handle a GET request
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
query = s.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
s.server.query_params = query
s.wfile.write("<html><head><title>Authentication Status</title></head>")
s.wfile.write("<body><p>The authentication flow has completed.</p>")
s.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def run(flow, storage):
"""Core code for a command-line application.
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
Returns:
Credentials, the obtained credential.
"""
if FLAGS.auth_local_webserver:
success = False
port_number = 0
for port in FLAGS.auth_host_port:
port_number = port
try:
httpd = BaseHTTPServer.HTTPServer((FLAGS.auth_host_name, port),
ClientRedirectHandler)
except socket.error, e:
pass
else:
success = True
break
FLAGS.auth_local_webserver = success
if FLAGS.auth_local_webserver:
oauth_callback = 'http://%s:%s/' % (FLAGS.auth_host_name, port_number)
else:
oauth_callback = 'oob'
authorize_url = flow.step1_get_authorize_url(oauth_callback)
print 'Go to the following link in your browser:'
print authorize_url
print
if FLAGS.auth_local_webserver:
print 'If your browser is on a different machine then exit and re-run this'
print 'application with the command-line parameter --noauth_local_webserver.'
print
if FLAGS.auth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
accepted = 'n'
while accepted.lower() == 'n':
accepted = raw_input('Have you authorized me? (y/n) ')
code = raw_input('What is the verification code? ').strip()
try:
credentials = flow.step2_exchange(code)
except FlowExchangeError:
sys.exit('The authentication has failed.')
storage.put(credentials)
credentials.set_store(storage.put)
print "You have successfully authenticated."
return credentials
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An OAuth 2.0 client
Tools for interacting with OAuth 2.0 protected
resources.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
import datetime
import httplib2
import logging
import urllib
import urlparse
try: # pragma: no cover
import simplejson
except ImportError: # pragma: no cover
try:
# Try to import from django, should work on App Engine
from django.utils import simplejson
except ImportError:
# Should work for Python2.6 and higher.
import json as simplejson
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
class Error(Exception):
"""Base error for this module."""
pass
class FlowExchangeError(Error):
"""Error trying to exchange an authorization grant for an access token."""
pass
class AccessTokenRefreshError(Error):
"""Error trying to refresh an expired access token."""
pass
class AccessTokenCredentialsError(Error):
"""Having only the access_token means no refresh is possible."""
pass
def _abstract():
raise NotImplementedError('You need to override this function')
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method
that applies the credentials to an HTTP transport.
"""
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential.
"""
def get(self):
"""Retrieve credential.
Returns:
oauth2client.client.Credentials
"""
_abstract()
def put(self, credentials):
"""Write a credential.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
class OAuth2Credentials(Credentials):
"""Credentials object for OAuth 2.0
Credentials can be applied to an httplib2.Http object using the authorize()
method, which then signs each request from that object with the OAuth 2.0
access token.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
def __init__(self, access_token, client_id, client_secret, refresh_token,
token_expiry, token_uri, user_agent):
"""Create an instance of OAuth2Credentials
This constructor is not usually called by the user, instead
OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow.
Args:
token_uri: string, URI of token endpoint.
client_id: string, client identifier.
client_secret: string, client secret.
access_token: string, access token.
token_expiry: datetime, when the access_token expires.
refresh_token: string, refresh token.
user_agent: string, The HTTP User-Agent to provide for this application.
Notes:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has expired and been refreshed.
"""
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
self.store = None
self.token_expiry = token_expiry
self.token_uri = token_uri
self.user_agent = user_agent
# True if the credentials have been revoked or expired and can't be
# refreshed.
self._invalid = False
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked."""
return getattr(self, '_invalid', False)
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has expired and been refreshed.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled.
"""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled.
"""
self.__dict__.update(state)
self.store = None
def _refresh(self, http_request):
"""Refresh the access_token using the refresh_token.
Args:
http: An instance of httplib2.Http.request
or something that acts like it.
"""
body = urllib.urlencode({
'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token' : self.refresh_token
})
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
logging.info("Refresing access_token")
resp, content = http_request(
self.token_uri, method='POST', body=body, headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if loads fails?
d = simplejson.loads(content)
self.access_token = d['access_token']
self.refresh_token = d.get('refresh_token', self.refresh_token)
if 'expires_in' in d:
self.token_expiry = datetime.timedelta(
seconds = int(d['expires_in'])) + datetime.datetime.now()
else:
self.token_expiry = None
if self.store is not None:
self.store(self)
else:
# An {'error':...} response body means the token is expired or revoked, so
# we flag the credentials as such.
logging.error('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
self._invalid = True
if self.store is not None:
self.store(self)
else:
logging.warning("Unable to store refreshed credentials, no Storage provided.")
except:
pass
raise AccessTokenRefreshError(error_msg)
def authorize(self, http):
"""Authorize an httplib2.Http instance with these credentials.
Args:
http: An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
if headers == None:
headers = {}
headers['authorization'] = 'OAuth ' + self.access_token
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
if resp.status == 401:
logging.info("Refreshing because we got a 401")
self._refresh(request_orig)
headers['authorization'] = 'OAuth ' + self.access_token
return request_orig(uri, method, body, headers,
redirections, connection_type)
else:
return (resp, content)
http.request = new_request
return http
class AccessTokenCredentials(OAuth2Credentials):
"""Credentials object for OAuth 2.0
Credentials can be applied to an httplib2.Http object using the authorize()
method, which then signs each request from that object with the OAuth 2.0
access token. This set of credentials is for the use case where you have
acquired an OAuth 2.0 access_token from another place such as a JavaScript
client or another web application, and wish to use it from Python. Because
only the access_token is present it can not be refreshed and will in time
expire.
AccessTokenCredentials objects may be safely pickled and unpickled.
Usage:
credentials = AccessTokenCredentials('<an access token>',
'my-user-agent/1.0')
http = httplib2.Http()
http = credentials.authorize(http)
Exceptions:
AccessTokenCredentialsExpired: raised when the access_token expires or is
revoked.
"""
def __init__(self, access_token, user_agent):
"""Create an instance of OAuth2Credentials
This is one of the few types if Credentials that you should contrust,
Credentials objects are usually instantiated by a Flow.
Args:
access_token: string, access token.
user_agent: string, The HTTP User-Agent to provide for this application.
Notes:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
"""
super(AccessTokenCredentials, self).__init__(
access_token,
None,
None,
None,
None,
None,
user_agent)
def _refresh(self, http_request):
raise AccessTokenCredentialsError(
"The access_token is expired or invalid and can't be refreshed.")
class OAuth2WebServerFlow(Flow):
"""Does the Web Server Flow for OAuth 2.0.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
def __init__(self, client_id, client_secret, scope, user_agent,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token',
**kwargs):
"""Constructor for OAuth2WebServerFlow
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string, scope of the credentials being requested.
user_agent: string, HTTP User-Agent to provide for this application.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
**kwargs: dict, The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.client_id = client_id
self.client_secret = client_secret
self.scope = scope
self.user_agent = user_agent
self.auth_uri = auth_uri
self.token_uri = token_uri
self.params = kwargs
self.redirect_uri = None
def step1_get_authorize_url(self, redirect_uri='oob'):
"""Returns a URI to redirect to the provider.
Args:
redirect_uri: string, Either the string 'oob' for a non-web-based
application, or a URI that handles the callback from
the authorization server.
If redirect_uri is 'oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
self.redirect_uri = redirect_uri
query = {
'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': redirect_uri,
'scope': self.scope,
}
query.update(self.params)
parts = list(urlparse.urlparse(self.auth_uri))
query.update(dict(parse_qsl(parts[4]))) # 4 is the index of the query part
parts[4] = urllib.urlencode(query)
return urlparse.urlunparse(parts)
def step2_exchange(self, code, http=None):
"""Exhanges a code for OAuth2Credentials.
Args:
code: string or dict, either the code as a string, or a dictionary
of the query parameters to the redirect_uri, which contains
the code.
http: httplib2.Http, optional http instance to use to do the fetch
"""
if not (isinstance(code, str) or isinstance(code, unicode)):
code = code['code']
body = urllib.urlencode({
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': self.redirect_uri,
'scope': self.scope
})
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
if http is None:
http = httplib2.Http()
resp, content = http.request(self.token_uri, method='POST', body=body, headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if simplejson.loads fails?
d = simplejson.loads(content)
access_token = d['access_token']
refresh_token = d.get('refresh_token', None)
token_expiry = None
if 'expires_in' in d:
token_expiry = datetime.datetime.now() + datetime.timedelta(seconds = int(d['expires_in']))
logging.info('Successfully retrieved access token: %s' % content)
return OAuth2Credentials(access_token, self.client_id, self.client_secret,
refresh_token, token_expiry, self.token_uri,
self.user_agent)
else:
logging.error('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
except:
pass
raise FlowExchangeError(error_msg)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 2.0
credentials.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
import threading
from client import Storage as BaseStorage
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
self._lock.acquire()
try:
f = open(self._filename, 'r')
credentials = pickle.loads(f.read())
f.close()
credentials.set_store(self.put)
except:
credentials = None
self._lock.release()
return credentials
def put(self, credentials):
"""Write a pickled Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
self._lock.acquire()
f = open(self._filename, 'w')
f.write(pickle.dumps(credentials))
f.close()
self._lock.release()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utilities for Django.
Utilities for using OAuth 2.0 in conjunction with
the Django datastore.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import oauth2client
import base64
import pickle
from django.db import models
from oauth2client.client import Storage as BaseStorage
class CredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self, connection=None):
return 'VARCHAR'
def to_python(self, value):
if not value:
return None
if isinstance(value, oauth2client.client.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class FlowField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self, connection=None):
return 'VARCHAR'
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Flow):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from
the datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credential = None
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self.put)
return credential
def put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
args = {self.key_name: self.key_value}
entity = self.model_class(**args)
setattr(entity, self.property_name, credentials)
entity.save()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use OAuth 2.0 on Google App Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import pickle
from client import AccessTokenRefreshError
from client import Credentials
from client import Flow
from client import OAuth2WebServerFlow
from client import Storage
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import login_required
from google.appengine.ext.webapp.util import run_wsgi_app
OAUTH2CLIENT_NAMESPACE = 'oauth2client#ns'
class FlowProperty(db.Property):
"""App Engine datastore Property for Flow.
Utility property that allows easy storage and retreival of an
oauth2client.Flow"""
# Tell what the user type is.
data_type = Flow
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Flow):
raise BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowProperty, self).validate(value)
def empty(self, value):
return not value
class CredentialsProperty(db.Property):
"""App Engine datastore Property for Credentials.
Utility property that allows easy storage and retrieval of
oath2client.Credentials
"""
# Tell what the user type is.
data_type = Credentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
cred = super(CredentialsProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(cred))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Credentials):
raise BadValueError('Property %s must be convertible '
'to an Credentials instance (%s)' %
(self.name, value))
return super(CredentialsProperty, self).validate(value)
def empty(self, value):
return not value
class StorageByKeyName(Storage):
"""Store and retrieve a single credential to and from
the App Engine datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsProperty
on a datastore model class, and that entities
are stored by key_name.
"""
def __init__(self, model, key_name, property_name, cache=None):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
cache: memcache, a write-through cache to put in front of the datastore
"""
self._model = model
self._key_name = key_name
self._property_name = property_name
self._cache = cache
def get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
if self._cache:
credential = self._cache.get(self._key_name)
if credential:
return pickle.loads(credential)
entity = self._model.get_or_insert(self._key_name)
credential = getattr(entity, self._property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self.put)
if self._cache:
self._cache.set(self._key_name, pickle.dumps(credentials))
return credential
def put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
if self._cache:
self._cache.set(self._key_name, pickle.dumps(credentials))
class CredentialsModel(db.Model):
"""Storage for OAuth 2.0 Credentials
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsProperty()
class OAuth2Decorator(object):
"""Utility for making OAuth 2.0 easier.
Instantiate and then use with oauth_required or oauth_aware
as decorators on webapp.RequestHandler methods.
Example:
decorator = OAuth2Decorator(
client_id='837...ent.com',
client_secret='Qh...wwI',
scope='https://www.googleapis.com/auth/buzz',
user_agent='my-sample-app/1.0')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
def __init__(self, client_id, client_secret, scope, user_agent,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token'):
"""Constructor for OAuth2Decorator
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string, scope of the credentials being requested.
user_agent: string, HTTP User-Agent to provide for this application.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
"""
self.flow = OAuth2WebServerFlow(client_id, client_secret, scope, user_agent,
auth_uri, token_uri)
self.credentials = None
self._request_handler = None
def oauth_required(self, method):
"""Decorator that starts the OAuth 2.0 dance.
Starts the OAuth dance for the logged in user if they haven't already
granted access for this application.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
@login_required
def check_oauth(request_handler, *args):
# Store the request URI in 'state' so we can use it later
self.flow.params['state'] = request_handler.request.url
self._request_handler = request_handler
user = users.get_current_user()
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
method(request_handler, *args)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
return check_oauth
def oauth_aware(self, method):
"""Decorator that sets up for OAuth 2.0 dance, but doesn't do it.
Does all the setup for the OAuth dance, but doesn't initiate it.
This decorator is useful if you want to create a page that knows
whether or not the user has granted access to this application.
From within a method decorated with @oauth_aware the has_credentials()
and authorize_url() methods can be called.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
@login_required
def setup_oauth(request_handler, *args):
self.flow.params['state'] = request_handler.request.url
self._request_handler = request_handler
user = users.get_current_user()
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
method(request_handler, *args)
return setup_oauth
def has_credentials(self):
"""True if for the logged in user there are valid access Credentials.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
return self.credentials is not None and not self.credentials.invalid
def authorize_url(self):
"""Returns the URL to start the OAuth dance.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
callback = self._request_handler.request.relative_url('/oauth2callback')
url = self.flow.step1_get_authorize_url(callback)
user = users.get_current_user()
memcache.set(user.user_id(), pickle.dumps(self.flow),
namespace=OAUTH2CLIENT_NAMESPACE)
return url
def http(self):
"""Returns an authorized http instance.
Must only be called from within an @oauth_required decorated method, or
from within an @oauth_aware decorated method where has_credentials()
returns True.
"""
return self.credentials.authorize(httplib2.Http())
class OAuth2Handler(webapp.RequestHandler):
"""Handler for the redirect_uri of the OAuth 2.0 dance."""
@login_required
def get(self):
error = self.request.get('error')
if error:
errormsg = self.request.get('error_description', error)
self.response.out.write('The authorization request failed: %s' % errormsg)
else:
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id(), namespace=OAUTH2CLIENT_NAMESPACE))
# This code should be ammended with application specific error
# handling. The following cases should be considered:
# 1. What if the flow doesn't exist in memcache? Or is corrupt?
# 2. What if the step2_exchange fails?
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').put(credentials)
self.redirect(self.request.get('state'))
else:
# TODO Add error handling here.
pass
application = webapp.WSGIApplication([('/oauth2callback', OAuth2Handler)])
def main():
run_wsgi_app(application)
#
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options] [path...]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
Perforce
CVS
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import ConfigParser
import cookielib
import fnmatch
import getpass
import logging
import marshal
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
try:
import keyring
except ImportError:
keyring = None
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# The account type used for authentication.
# This line could be changed by the review server (see handler for
# upload.py).
AUTH_ACCOUNT_TYPE = "GOOGLE"
# URL of the default review server. As for AUTH_ACCOUNT_TYPE, this line could be
# changed by the review server (see handler for upload.py).
DEFAULT_REVIEW_SERVER = "codereview.appspot.com"
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_PERFORCE = "Perforce"
VCS_CVS = "CVS"
VCS_UNKNOWN = "Unknown"
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = ['application/javascript', 'application/json',
'application/x-javascript', 'application/xml',
'application/x-freemind', 'application/x-sh']
VCS_ABBREVIATIONS = {
VCS_MERCURIAL.lower(): VCS_MERCURIAL,
"hg": VCS_MERCURIAL,
VCS_SUBVERSION.lower(): VCS_SUBVERSION,
"svn": VCS_SUBVERSION,
VCS_PERFORCE.lower(): VCS_PERFORCE,
"p4": VCS_PERFORCE,
VCS_GIT.lower(): VCS_GIT,
VCS_CVS.lower(): VCS_CVS,
}
# The result of parsing Subversion's [auto-props] setting.
svn_auto_props_map = None
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False, account_type=AUTH_ACCOUNT_TYPE):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
account_type: Account type used for authentication. Defaults to
AUTH_ACCOUNT_TYPE.
"""
self.host = host
if (not self.host.startswith("http://") and
not self.host.startswith("https://")):
self.host = "http://" + self.host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.account_type = account_type
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = self.account_type
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.\n"
"If you are using a Google Apps account the URL is:\n"
"https://www.google.com/a/yourdomain.com/UnlockCaptcha")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
extra_headers: Dict containing additional HTTP headers that should be
included in the request (string header names mapped to their values),
or None to not include any additional headers.
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
if extra_headers:
for header, value in extra_headers.items():
req.add_header(header, value)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
elif e.code == 301:
# Handle permanent redirect manually.
url = e.info()["location"]
url_loc = urlparse.urlparse(url)
self.host = '%s://%s' % (url_loc[0], url_loc[1])
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(
usage="%prog [options] [-- diff_options] [path...]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs.")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
group.add_option("--print_diffs", dest="print_diffs", action="store_true",
help="Print full diffs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default=DEFAULT_REVIEW_SERVER,
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
group.add_option("--account_type", action="store", dest="account_type",
metavar="TYPE", default=AUTH_ACCOUNT_TYPE,
choices=["GOOGLE", "HOSTED"],
help=("Override the default account type "
"(defaults to '%default', "
"valid choices are 'GOOGLE' and 'HOSTED')."))
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default="jcgregorio@google.com",
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC",
default="google-api-python-client@googlegroups.com",
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--base_url", action="store", dest="base_url", default=None,
help="Base repository URL (listed as \"Base URL\" when "
"viewing issue). If omitted, will be guessed automatically "
"for SVN repos and left blank for others.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Base revision/branch/tree to diff against. Use "
"rev1:rev2 range to review already committed changeset.")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Version control system (optional, usually upload.py "
"already guesses the right VCS)."))
group.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props", default=False,
help=("Emulate Subversion's auto properties feature."))
# Perforce-specific
group = parser.add_option_group("Perforce-specific options "
"(overrides P4 environment variables)")
group.add_option("--p4_port", action="store", dest="p4_port",
metavar="P4_PORT", default=None,
help=("Perforce server and port (optional)"))
group.add_option("--p4_changelist", action="store", dest="p4_changelist",
metavar="P4_CHANGELIST", default=None,
help=("Perforce changelist id"))
group.add_option("--p4_client", action="store", dest="p4_client",
metavar="P4_CLIENT", default=None,
help=("Perforce client/workspace"))
group.add_option("--p4_user", action="store", dest="p4_user",
metavar="P4_USER", default=None,
help=("Perforce user"))
def GetRpcServer(server, email=None, host_override=None, save_cookies=True,
account_type=AUTH_ACCOUNT_TYPE):
"""Returns an instance of an AbstractRpcServer.
Args:
server: String containing the review server URL.
email: String containing user's email address.
host_override: If not None, string containing an alternate hostname to use
in the host header.
save_cookies: Whether authentication cookies should be saved to disk.
account_type: Account type for authentication, either 'GOOGLE'
or 'HOSTED'. Defaults to AUTH_ACCOUNT_TYPE.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
# If this is the dev_appserver, use fake authentication.
host = (host_override or server).lower()
if re.match(r'(http://)?localhost([:/]|$)', host):
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
server,
lambda: (email, "password"),
host_override=host_override,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=save_cookies,
account_type=account_type)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Create a local alias to the email variable to avoid Python's crazy
# scoping rules.
local_email = email
if local_email is None:
local_email = GetEmail("Email (login for uploading to %s)" % server)
password = None
if keyring:
password = keyring.get_password(host, local_email)
if password is not None:
print "Using password from system keyring."
else:
password = getpass.getpass("Password for %s: " % local_email)
if keyring:
answer = raw_input("Store password in system keyring?(y/N) ").strip()
if answer == "y":
keyring.set_password(host, local_email, password)
return (local_email, password)
return rpc_server_class(server,
GetUserCredentials,
host_override=host_override,
save_cookies=save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def PostProcessDiff(self, diff):
"""Return the diff with any special post processing this VCS needs, e.g.
to include an svn-style "Index:"."""
return diff
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# Base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns base URL for current diff.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
if line.startswith("URL: "):
url = line.split()[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
guess = ""
if netloc == "svn.python.org" and scheme == "svn+ssh":
path = "projects" + path
scheme = "http"
guess = "Python "
elif netloc.endswith(".googlecode.com"):
scheme = "http"
guess = "Google Code "
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed %sbase = %s", guess, base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
else:
mimetype = mimetype.strip()
get_base = False
is_binary = (bool(mimetype) and
not mimetype.startswith("text/") and
not mimetype in TEXT_MIMETYPES)
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content, ret_code = RunShellWithReturnCode(
["svn", "cat", filename], universal_newlines=universal_newlines)
if ret_code and status[0] == "R":
# It's a replaced file without local history (see issue208).
# The base file needs to be fetched from the server.
url = "%s/%s" % (self.svn_base, filename)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
elif ret_code:
ErrorExit("Got error status from 'svn cat %s'" % filename)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def PostProcessDiff(self, gitdiff):
"""Converts the diff output to include an svn-style "Index:" line as well
as record the hashes of the files, so we can upload them along with our
diff."""
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
def IsFileNew(filename):
return filename in self.hashes and self.hashes[filename][0] is None
def AddSubversionPropertyChange(filename):
"""Add svn's property change information into the patch if given file is
new file.
We use Subversion's auto-props setting to retrieve its property.
See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
Subversion's [auto-props] setting.
"""
if self.options.emulate_svn_auto_props and IsFileNew(filename):
svnprops = GetSubversionPropertyChanges(filename)
if svnprops:
svndiff.append("\n" + svnprops + "\n")
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
# Add auto property here for previously seen file.
if filename is not None:
AddSubversionPropertyChange(filename)
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
# Add auto property for the last seen file.
assert filename is not None
AddSubversionPropertyChange(filename)
return "".join(svndiff)
def GenerateDiff(self, extra_args):
extra_args = extra_args[:]
if self.options.revision:
if ":" in self.options.revision:
extra_args = self.options.revision.split(":", 1) + extra_args
else:
extra_args = [self.options.revision] + extra_args
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
return RunShell(["git", "diff", "--no-ext-diff", "--full-index", "-M"]
+ extra_args, env=env)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash, is_binary):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=not is_binary)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
is_binary = self.IsBinary(filename)
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(["git", "show", "HEAD:" + filename])
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
is_image = self.IsImage(filename)
# Grab the before/after content if we need it.
# We should include file contents if it's text or it's an image.
if not is_binary or is_image:
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before, is_binary)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if is_image and hash_after:
new_content = self.GetFileContent(hash_after, is_binary)
return (base_content, new_content, is_binary, status)
class CVSVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for CVS."""
def __init__(self, options):
super(CVSVCS, self).__init__(options)
def GetOriginalContent_(self, filename):
RunShell(["cvs", "up", filename], silent_ok=True)
# TODO need detect file content encoding
content = open(filename).read()
return content.replace("\r\n", "\n")
def GetBaseFile(self, filename):
base_content = None
new_content = None
is_binary = False
status = "A"
output, retcode = RunShellWithReturnCode(["cvs", "status", filename])
if retcode:
ErrorExit("Got error status from 'cvs status %s'" % filename)
if output.find("Status: Locally Modified") != -1:
status = "M"
temp_filename = "%s.tmp123" % filename
os.rename(filename, temp_filename)
base_content = self.GetOriginalContent_(filename)
os.rename(temp_filename, filename)
elif output.find("Status: Locally Added"):
status = "A"
base_content = ""
elif output.find("Status: Needs Checkout"):
status = "D"
base_content = self.GetOriginalContent_(filename)
return (base_content, new_content, is_binary, status)
def GenerateDiff(self, extra_args):
cmd = ["cvs", "diff", "-u", "-N"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(extra_args)
data, retcode = RunShellWithReturnCode(cmd)
count = 0
if retcode == 0:
for line in data.splitlines():
if line.startswith("Index:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from cvs diff")
return data
def GetUnknownFiles(self):
status = RunShell(["cvs", "diff"],
silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
status, _ = out[0].split(' ', 1)
if len(out) > 1 and status == "A":
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
class PerforceVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Perforce."""
def __init__(self, options):
def ConfirmLogin():
# Make sure we have a valid perforce session
while True:
data, retcode = self.RunPerforceCommandWithReturnCode(
["login", "-s"], marshal_output=True)
if not data:
ErrorExit("Error checking perforce login")
if not retcode and (not "code" in data or data["code"] != "error"):
break
print "Enter perforce password: "
self.RunPerforceCommandWithReturnCode(["login"])
super(PerforceVCS, self).__init__(options)
self.p4_changelist = options.p4_changelist
if not self.p4_changelist:
ErrorExit("A changelist id is required")
if (options.revision):
ErrorExit("--rev is not supported for perforce")
self.p4_port = options.p4_port
self.p4_client = options.p4_client
self.p4_user = options.p4_user
ConfirmLogin()
if not options.message:
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
if description and "desc" in description:
# Rietveld doesn't support multi-line descriptions
raw_message = description["desc"].strip()
lines = raw_message.splitlines()
if len(lines):
options.message = lines[0]
def RunPerforceCommandWithReturnCode(self, extra_args, marshal_output=False,
universal_newlines=True):
args = ["p4"]
if marshal_output:
# -G makes perforce format its output as marshalled python objects
args.extend(["-G"])
if self.p4_port:
args.extend(["-p", self.p4_port])
if self.p4_client:
args.extend(["-c", self.p4_client])
if self.p4_user:
args.extend(["-u", self.p4_user])
args.extend(extra_args)
data, retcode = RunShellWithReturnCode(
args, print_output=False, universal_newlines=universal_newlines)
if marshal_output and data:
data = marshal.loads(data)
return data, retcode
def RunPerforceCommand(self, extra_args, marshal_output=False,
universal_newlines=True):
# This might be a good place to cache call results, since things like
# describe or fstat might get called repeatedly.
data, retcode = self.RunPerforceCommandWithReturnCode(
extra_args, marshal_output, universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (extra_args, data))
return data
def GetFileProperties(self, property_key_prefix = "", command = "describe"):
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
changed_files = {}
file_index = 0
# Try depotFile0, depotFile1, ... until we don't find a match
while True:
file_key = "depotFile%d" % file_index
if file_key in description:
filename = description[file_key]
change_type = description[property_key_prefix + str(file_index)]
changed_files[filename] = change_type
file_index += 1
else:
break
return changed_files
def GetChangedFiles(self):
return self.GetFileProperties("action")
def GetUnknownFiles(self):
# Perforce doesn't detect new files, they have to be explicitly added
return []
def IsBaseBinary(self, filename):
base_filename = self.GetBaseFilename(filename)
return self.IsBinaryHelper(base_filename, "files")
def IsPendingBinary(self, filename):
return self.IsBinaryHelper(filename, "describe")
def IsBinary(self, filename):
ErrorExit("IsBinary is not safe: call IsBaseBinary or IsPendingBinary")
def IsBinaryHelper(self, filename, command):
file_types = self.GetFileProperties("type", command)
if not filename in file_types:
ErrorExit("Trying to check binary status of unknown file %s." % filename)
# This treats symlinks, macintosh resource files, temporary objects, and
# unicode as binary. See the Perforce docs for more details:
# http://www.perforce.com/perforce/doc.current/manuals/cmdref/o.ftypes.html
return not file_types[filename].endswith("text")
def GetFileContent(self, filename, revision, is_binary):
file_arg = filename
if revision:
file_arg += "#" + revision
# -q suppresses the initial line that displays the filename and revision
return self.RunPerforceCommand(["print", "-q", file_arg],
universal_newlines=not is_binary)
def GetBaseFilename(self, filename):
actionsWithDifferentBases = [
"move/add", # p4 move
"branch", # p4 integrate (to a new file), similar to hg "add"
"add", # p4 integrate (to a new file), after modifying the new file
]
# We only see a different base for "add" if this is a downgraded branch
# after a file was branched (integrated), then edited.
if self.GetAction(filename) in actionsWithDifferentBases:
# -Or shows information about pending integrations/moves
fstat_result = self.RunPerforceCommand(["fstat", "-Or", filename],
marshal_output=True)
baseFileKey = "resolveFromFile0" # I think it's safe to use only file0
if baseFileKey in fstat_result:
return fstat_result[baseFileKey]
return filename
def GetBaseRevision(self, filename):
base_filename = self.GetBaseFilename(filename)
have_result = self.RunPerforceCommand(["have", base_filename],
marshal_output=True)
if "haveRev" in have_result:
return have_result["haveRev"]
def GetLocalFilename(self, filename):
where = self.RunPerforceCommand(["where", filename], marshal_output=True)
if "path" in where:
return where["path"]
def GenerateDiff(self, args):
class DiffData:
def __init__(self, perforceVCS, filename, action):
self.perforceVCS = perforceVCS
self.filename = filename
self.action = action
self.base_filename = perforceVCS.GetBaseFilename(filename)
self.file_body = None
self.base_rev = None
self.prefix = None
self.working_copy = True
self.change_summary = None
def GenerateDiffHeader(diffData):
header = []
header.append("Index: %s" % diffData.filename)
header.append("=" * 67)
if diffData.base_filename != diffData.filename:
if diffData.action.startswith("move"):
verb = "rename"
else:
verb = "copy"
header.append("%s from %s" % (verb, diffData.base_filename))
header.append("%s to %s" % (verb, diffData.filename))
suffix = "\t(revision %s)" % diffData.base_rev
header.append("--- " + diffData.base_filename + suffix)
if diffData.working_copy:
suffix = "\t(working copy)"
header.append("+++ " + diffData.filename + suffix)
if diffData.change_summary:
header.append(diffData.change_summary)
return header
def GenerateMergeDiff(diffData, args):
# -du generates a unified diff, which is nearly svn format
diffData.file_body = self.RunPerforceCommand(
["diff", "-du", diffData.filename] + args)
diffData.base_rev = self.GetBaseRevision(diffData.filename)
diffData.prefix = ""
# We have to replace p4's file status output (the lines starting
# with +++ or ---) to match svn's diff format
lines = diffData.file_body.splitlines()
first_good_line = 0
while (first_good_line < len(lines) and
not lines[first_good_line].startswith("@@")):
first_good_line += 1
diffData.file_body = "\n".join(lines[first_good_line:])
return diffData
def GenerateAddDiff(diffData):
fstat = self.RunPerforceCommand(["fstat", diffData.filename],
marshal_output=True)
if "headRev" in fstat:
diffData.base_rev = fstat["headRev"] # Re-adding a deleted file
else:
diffData.base_rev = "0" # Brand new file
diffData.working_copy = False
rel_path = self.GetLocalFilename(diffData.filename)
diffData.file_body = open(rel_path, 'r').read()
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -0,0 +1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " @@"
diffData.prefix = "+"
return diffData
def GenerateDeleteDiff(diffData):
diffData.base_rev = self.GetBaseRevision(diffData.filename)
is_base_binary = self.IsBaseBinary(diffData.filename)
# For deletes, base_filename == filename
diffData.file_body = self.GetFileContent(diffData.base_filename,
None,
is_base_binary)
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " +0,0 @@"
diffData.prefix = "-"
return diffData
changed_files = self.GetChangedFiles()
svndiff = []
filecount = 0
for (filename, action) in changed_files.items():
svn_status = self.PerforceActionToSvnStatus(action)
if svn_status == "SKIP":
continue
diffData = DiffData(self, filename, action)
# Is it possible to diff a branched file? Stackoverflow says no:
# http://stackoverflow.com/questions/1771314/in-perforce-command-line-how-to-diff-a-file-reopened-for-add
if svn_status == "M":
diffData = GenerateMergeDiff(diffData, args)
elif svn_status == "A":
diffData = GenerateAddDiff(diffData)
elif svn_status == "D":
diffData = GenerateDeleteDiff(diffData)
else:
ErrorExit("Unknown file action %s (svn action %s)." % \
(action, svn_status))
svndiff += GenerateDiffHeader(diffData)
for line in diffData.file_body.splitlines():
svndiff.append(diffData.prefix + line)
filecount += 1
if not filecount:
ErrorExit("No valid patches found in output from p4 diff")
return "\n".join(svndiff) + "\n"
def PerforceActionToSvnStatus(self, status):
# Mirroring the list at http://permalink.gmane.org/gmane.comp.version-control.mercurial.devel/28717
# Is there something more official?
return {
"add" : "A",
"branch" : "A",
"delete" : "D",
"edit" : "M", # Also includes changing file types.
"integrate" : "M",
"move/add" : "M",
"move/delete": "SKIP",
"purge" : "D", # How does a file's status become "purge"?
}[status]
def GetAction(self, filename):
changed_files = self.GetChangedFiles()
if not filename in changed_files:
ErrorExit("Trying to get base version of unknown file %s." % filename)
return changed_files[filename]
def GetBaseFile(self, filename):
base_filename = self.GetBaseFilename(filename)
base_content = ""
new_content = None
status = self.PerforceActionToSvnStatus(self.GetAction(filename))
if status != "A":
revision = self.GetBaseRevision(base_filename)
if not revision:
ErrorExit("Couldn't find base revision for file %s" % filename)
is_base_binary = self.IsBaseBinary(base_filename)
base_content = self.GetFileContent(base_filename,
revision,
is_base_binary)
is_binary = self.IsPendingBinary(filename)
if status != "D" and status != "SKIP":
relpath = self.GetLocalFilename(filename)
if is_binary and self.IsImage(relpath):
new_content = open(relpath, "rb").read()
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCSName(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, VCS_PERFORCE,
VCS_CVS, or VCS_UNKNOWN.
Since local perforce repositories can't be easily detected, this method
will only guess VCS_PERFORCE if any perforce options have been specified.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
for attribute, value in options.__dict__.iteritems():
if attribute.startswith("p4") and value != None:
return (VCS_PERFORCE, None)
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return (VCS_MERCURIAL, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return (VCS_SUBVERSION, None)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return (VCS_GIT, None)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
# detect CVS repos use `cvs status && $? == 0` rules
try:
out, returncode = RunShellWithReturnCode(["cvs", "status"])
if returncode == 0:
return (VCS_CVS, None)
except OSError, (errno, message):
if errno != 2:
raise
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName(options)
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_PERFORCE:
return PerforceVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
elif vcs == VCS_CVS:
return CVSVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def CheckReviewer(reviewer):
"""Validate a reviewer -- either a nickname or an email addres.
Args:
reviewer: A nickname or an email address.
Calls ErrorExit() if it is an invalid email address.
"""
if "@" not in reviewer:
return # Assume nickname
parts = reviewer.split("@")
if len(parts) > 2:
ErrorExit("Invalid email address: %r" % reviewer)
assert len(parts) == 2
if "." not in parts[1]:
ErrorExit("Invalid email address: %r" % reviewer)
def LoadSubversionAutoProperties():
"""Returns the content of [auto-props] section of Subversion's config file as
a dictionary.
Returns:
A dictionary whose key-value pair corresponds the [auto-props] section's
key-value pair.
In following cases, returns empty dictionary:
- config file doesn't exist, or
- 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
"""
if os.name == 'nt':
subversion_config = os.environ.get("APPDATA") + "\\Subversion\\config"
else:
subversion_config = os.path.expanduser("~/.subversion/config")
if not os.path.exists(subversion_config):
return {}
config = ConfigParser.ConfigParser()
config.read(subversion_config)
if (config.has_section("miscellany") and
config.has_option("miscellany", "enable-auto-props") and
config.getboolean("miscellany", "enable-auto-props") and
config.has_section("auto-props")):
props = {}
for file_pattern in config.options("auto-props"):
props[file_pattern] = ParseSubversionPropertyValues(
config.get("auto-props", file_pattern))
return props
else:
return {}
def ParseSubversionPropertyValues(props):
"""Parse the given property value which comes from [auto-props] section and
returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
See the following doctest for example.
>>> ParseSubversionPropertyValues('svn:eol-style=LF')
[('svn:eol-style', 'LF')]
>>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
[('svn:mime-type', 'image/jpeg')]
>>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
[('svn:eol-style', 'LF'), ('svn:executable', '*')]
"""
key_value_pairs = []
for prop in props.split(";"):
key_value = prop.split("=")
assert len(key_value) <= 2
if len(key_value) == 1:
# If value is not given, use '*' as a Subversion's convention.
key_value_pairs.append((key_value[0], "*"))
else:
key_value_pairs.append((key_value[0], key_value[1]))
return key_value_pairs
def GetSubversionPropertyChanges(filename):
"""Return a Subversion's 'Property changes on ...' string, which is used in
the patch file.
Args:
filename: filename whose property might be set by [auto-props] config.
Returns:
A string like 'Property changes on |filename| ...' if given |filename|
matches any entries in [auto-props] section. None, otherwise.
"""
global svn_auto_props_map
if svn_auto_props_map is None:
svn_auto_props_map = LoadSubversionAutoProperties()
all_props = []
for file_pattern, props in svn_auto_props_map.items():
if fnmatch.fnmatch(filename, file_pattern):
all_props.extend(props)
if all_props:
return FormatSubversionPropertyChanges(filename, all_props)
return None
def FormatSubversionPropertyChanges(filename, props):
"""Returns Subversion's 'Property changes on ...' strings using given filename
and properties.
Args:
filename: filename
props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
Returns:
A string which can be used in the patch file for Subversion.
See the following doctest for example.
>>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
Property changes on: foo.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
<BLANKLINE>
"""
prop_changes_lines = [
"Property changes on: %s" % filename,
"___________________________________________________________________"]
for key, value in props:
prop_changes_lines.append("Added: " + key)
prop_changes_lines.append(" + " + value)
return "\n".join(prop_changes_lines) + "\n"
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
base = options.base_url
if isinstance(vcs, SubversionVCS):
# Guessing the base field is only supported for Subversion.
# Note: Fetching base files may become deprecated in future releases.
guessed_base = vcs.GuessBase(options.download_base)
if base:
if guessed_base and base != guessed_base:
print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
(base, guessed_base)
else:
base = guessed_base
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
data = vcs.PostProcessDiff(data)
if options.print_diffs:
print "Rietveld diff start:*****"
print data
print "Rietveld diff end:*****"
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options.server,
options.email,
options.host,
options.save_cookies,
options.account_type)
form_fields = [("subject", message)]
if base:
b = urlparse.urlparse(base)
username, netloc = urllib.splituser(b.netloc)
if username:
logging.info("Removed username from base URL")
base = urlparse.urlunparse((b.scheme, netloc, b.path, b.params,
b.query, b.fragment))
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
CheckReviewer(reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
CheckReviewer(cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for setup.py file(s)."""
__author__ = 'tom.h.miller@gmail.com (Tom Miller)'
import sys
def is_missing(packages):
"""Return True if a package can't be imported."""
retval = True
sys_path_original = sys.path[:]
# Remove the current directory from the list of paths to check when
# importing modules.
try:
# Sometimes it's represented by an empty string?
sys.path.remove('')
except ValueError:
import os.path
try:
sys.path.remove(os.path.abspath(os.path.curdir))
except ValueError:
pass
if not isinstance(packages, type([])):
packages = [packages]
for name in packages:
try:
__import__(name)
retval = False
except ImportError:
retval = True
if retval == False:
break
sys.path = sys_path_original
return retval
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.api import users
from google.appengine.ext import db
import apiclient.ext.appengine
import logging
import simple_wrapper
class Flow(db.Model):
flow = apiclient.ext.appengine.FlowThreeLeggedProperty()
class Credentials(db.Model):
credentials = apiclient.ext.appengine.OAuthCredentialsProperty()
class oauth_required(object):
def __init__(self, *decorator_args, **decorator_kwargs):
"""A decorator to require that a user has gone through the OAuth dance before accessing a handler.
To use it, decorate your get() method like this:
@oauth_required
def get(self):
buzz_wrapper = oauth_handlers.build_buzz_wrapper_for_current_user()
user_profile_data = buzz_wrapper.get_profile()
self.response.out.write('Hello, ' + user_profile_data.displayName)
We will redirect the user to the OAuth endpoint and afterwards the OAuth
will send the user back to the DanceFinishingHandler that you have configured.
This should only used for GET requests since any payload in a POST request
will be lost. Any parameters in the original URL will be preserved.
"""
self.decorator_args = decorator_args
self.decorator_kwargs = decorator_kwargs
def __load_settings_from_file__(self):
# Load settings from settings.py module if it's available
# Only return the keys that the user has explicitly set
try:
import settings
# This uses getattr so that the user can set just the parameters they care about
flow_settings = {
'consumer_key' : getattr(settings, 'CONSUMER_KEY', None),
'consumer_secret' : getattr(settings, 'CONSUMER_SECRET', None),
'user_agent' : getattr(settings, 'USER_AGENT', None),
'domain' : getattr(settings, 'DOMAIN', None),
'scope' : getattr(settings, 'SCOPE', None),
'xoauth_display_name' : getattr(settings, 'XOAUTH_DISPLAY_NAME', None)
}
# Strip out all the keys that weren't specified in the settings.py
# This is needed to ensure that those keys don't override what's
# specified in the decorator invocation
cleaned_flow_settings = {}
for key,value in flow_settings.items():
if value is not None:
cleaned_flow_settings[key] = value
return cleaned_flow_settings
except ImportError:
return {}
def __load_settings__(self):
# Set up the default arguments and override them with whatever values have been given to the decorator
flow_settings = {
'consumer_key' : 'anonymous',
'consumer_secret' : 'anonymous',
'user_agent' : 'google-api-client-python-buzz-webapp/1.0',
'domain' : 'anonymous',
'scope' : 'https://www.googleapis.com/auth/buzz',
'xoauth_display_name' : 'Default Display Name For OAuth Application'
}
logging.info('OAuth settings: %s ' % flow_settings)
# Override the defaults with whatever the user may have put into settings.py
settings_kwargs = self.__load_settings_from_file__()
flow_settings.update(settings_kwargs)
logging.info('OAuth settings: %s ' % flow_settings)
# Override the defaults with whatever the user have specified in the decorator's invocation
flow_settings.update(self.decorator_kwargs)
logging.info('OAuth settings: %s ' % flow_settings)
return flow_settings
def __call__(self, handler_method):
def check_oauth_credentials_wrapper(*args, **kwargs):
handler_instance = args[0]
# TODO(ade) Add support for POST requests
if handler_instance.request.method != 'GET':
raise webapp.Error('The check_oauth decorator can only be used for GET '
'requests')
# Is this a request from the OAuth system after finishing the OAuth dance?
if handler_instance.request.get('oauth_verifier'):
user = users.get_current_user()
logging.debug('Finished OAuth dance for: %s' % user.email())
f = Flow.get_by_key_name(user.user_id())
if f:
credentials = f.flow.step2_exchange(handler_instance.request.params)
c = Credentials(key_name=user.user_id(), credentials=credentials)
c.put()
# We delete the flow so that a malicious actor can't pretend to be the OAuth service
# and replace a valid token with an invalid token
f.delete()
handler_method(*args)
return
# Find out who the user is. If we don't know who you are then we can't
# look up your OAuth credentials thus we must ensure the user is logged in.
user = users.get_current_user()
if not user:
handler_instance.redirect(users.create_login_url(handler_instance.request.uri))
return
# Now that we know who the user is look up their OAuth credentials
# if we don't find the credentials then send them through the OAuth dance
if not Credentials.get_by_key_name(user.user_id()):
flow_settings = self.__load_settings__()
p = apiclient.discovery.build("buzz", "v1")
flow = apiclient.oauth.FlowThreeLegged(p.auth_discovery(),
consumer_key=flow_settings['consumer_key'],
consumer_secret=flow_settings['consumer_secret'],
user_agent=flow_settings['user_agent'],
domain=flow_settings['domain'],
scope=flow_settings['scope'],
xoauth_displayname=flow_settings['xoauth_display_name'])
# The OAuth system needs to send the user right back here so that they
# get to the page they originally intended to visit.
oauth_return_url = handler_instance.request.uri
authorize_url = flow.step1_get_authorize_url(oauth_return_url)
f = Flow(key_name=user.user_id(), flow=flow)
f.put()
handler_instance.redirect(authorize_url)
return
# If the user already has a token then call the wrapped handler
handler_method(*args)
return check_oauth_credentials_wrapper
def build_buzz_wrapper_for_current_user(api_key=None):
user = users.get_current_user()
credentials = Credentials.get_by_key_name(user.user_id()).credentials
if not api_key:
try:
import settings
api_key = getattr(settings, 'API_KEY', None)
except ImportError:
return {}
return simple_wrapper.SimpleWrapper(api_key=api_key,
credentials=credentials) | Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'ade@google.com (Ade Oshineye)'
import apiclient.discovery
import httplib2
import logging
class SimpleWrapper(object):
"Simple client that exposes the bare minimum set of common Buzz operations"
def __init__(self, api_key=None, credentials=None):
self.http = httplib2.Http()
if credentials:
logging.debug('Using api_client with credentials')
self.http = credentials.authorize(self.http)
self.api_client = apiclient.discovery.build('buzz', 'v1', http=self.http, developerKey=api_key)
else:
logging.debug('Using api_client that doesn\'t have credentials')
self.api_client = apiclient.discovery.build('buzz', 'v1', http=self.http, developerKey=api_key)
def search(self, query, user_token=None, max_results=10):
if query is None or query.strip() is '':
return None
json = self.api_client.activities().search(q=query, max_results=max_results).execute()
if json.has_key('items'):
return json['items']
return []
def post(self, message_body, user_id='@me'):
if message_body is None or message_body.strip() is '':
return None
activities = self.api_client.activities()
logging.info('Retrieved activities for: %s' % user_id)
activity = activities.insert(userId=user_id, body={
'data' : {
'title': message_body,
'object': {
'content': message_body,
'type': 'note'}
}
}
).execute()
url = activity['links']['alternate'][0]['href']
logging.info('Just created: %s' % url)
return url
def get_profile(self, user_id='@me'):
user_profile_data = self.api_client.people().get(userId=user_id).execute()
return user_profile_data
def get_follower_count(self, user_id='@me'):
return self.__get_group_count(user_id, '@followers')
def get_following_count(self, user_id='@me'):
return self.__get_group_count(user_id, '@following')
def __get_group_count(self, user_id, group_id):
# Fetching 0 results is a performance optimisation that minimises the
# amount of data that's getting retrieved from the server
cmd = self.api_client.people().list(userId=user_id, groupId=group_id,
max_results=0)
members = cmd.execute()
if 'totalResults' not in members.keys():
return -1
return members['totalResults']
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate command-line samples from stubs.
Generates a command-line client sample application from a set of files
that contain only the relevant portions that change between each API.
This allows all the common code to go into a template.
Usage:
python sample_generator.py
Must be run from the root of the respository directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import os.path
import glob
import sys
import pprint
import string
import textwrap
if not os.path.isdir('samples/src'):
sys.exit('Must be run from root of the respository directory.')
f = open('samples/src/template.tmpl', 'r')
template = string.Template(f.read())
f.close()
for filename in glob.glob('samples/src/*.py'):
# Create a dictionary from the config file to later use in filling in the
# templates.
f = open(filename, 'r')
contents = f.read()
f.close()
config, content = contents.split('\n\n', 1)
variables = {}
for line in config.split('\n'):
key, value = line[1:].split(':', 1)
variables[key.strip()] = value.strip()
lines = content.split('\n')
outlines = []
for l in lines:
if l:
outlines.append(' ' + l)
else:
outlines.append('')
content = '\n'.join(outlines)
variables['description'] = textwrap.fill(variables['description'])
variables['content'] = content
variables['name'] = os.path.basename(filename).split('.', 1)[0]
f = open(os.path.join('samples', variables['name'], variables['name'] + '.py'), 'w')
f.write(template.substitute(variables))
f.close()
print 'Processed: %s' % variables['name']
| Python |
import unittest
import doctest
class OptionalExtensionTestSuite(unittest.TestSuite):
def run(self, result):
import simplejson
run = unittest.TestSuite.run
run(self, result)
simplejson._toggle_speedups(False)
run(self, result)
simplejson._toggle_speedups(True)
return result
def additional_tests(suite=None):
import simplejson
import simplejson.encoder
import simplejson.decoder
if suite is None:
suite = unittest.TestSuite()
for mod in (simplejson, simplejson.encoder, simplejson.decoder):
suite.addTest(doctest.DocTestSuite(mod))
suite.addTest(doctest.DocFileSuite('../../index.rst'))
return suite
def all_tests_suite():
suite = unittest.TestLoader().loadTestsFromNames([
'simplejson.tests.test_check_circular',
'simplejson.tests.test_decode',
'simplejson.tests.test_default',
'simplejson.tests.test_dump',
'simplejson.tests.test_encode_basestring_ascii',
'simplejson.tests.test_encode_for_html',
'simplejson.tests.test_fail',
'simplejson.tests.test_float',
'simplejson.tests.test_indent',
'simplejson.tests.test_pass1',
'simplejson.tests.test_pass2',
'simplejson.tests.test_pass3',
'simplejson.tests.test_recursion',
'simplejson.tests.test_scanstring',
'simplejson.tests.test_separators',
'simplejson.tests.test_speedups',
'simplejson.tests.test_unicode',
'simplejson.tests.test_decimal',
])
suite = additional_tests(suite)
return OptionalExtensionTestSuite([suite])
def main():
runner = unittest.TextTestRunner()
suite = all_tests_suite()
runner.run(suite)
if __name__ == '__main__':
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
main()
| Python |
"""Implementation of JSONEncoder
"""
import re
from decimal import Decimal
def _import_speedups():
try:
from simplejson import _speedups
return _speedups.encode_basestring_ascii, _speedups.make_encoder
except ImportError:
return None, None
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
from simplejson.decoder import PosInf
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None,
use_decimal=False):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
If use_decimal is true (not the default), ``decimal.Decimal`` will
be supported directly by the encoder. For the inverse, decode JSON
with ``parse_float=decimal.Decimal``.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.use_decimal = use_decimal
if isinstance(indent, (int, long)):
indent = ' ' * indent
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
key_memo = {}
if (_one_shot and c_make_encoder is not None
and not self.indent and not self.sort_keys):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan, key_memo, self.use_decimal)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.use_decimal)
try:
return _iterencode(o, 0)
finally:
key_memo.clear()
class JSONEncoderForHTML(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
_use_decimal,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
Decimal=Decimal,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| Python |
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from simplejson.scanner import make_scanner
def _import_c_scanstring():
try:
from simplejson._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corresponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = linecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, pos)
else:
self.endlineno, self.endcolno = None, None
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise JSONDecodeError(msg, s, end)
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise JSONDecodeError(msg, s, end)
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting : delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise JSONDecodeError("No JSON object could be decoded", s, idx)
return obj, end
| Python |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.1.1'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from simplejson._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=False,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not use_decimal
and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal, **kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
| Python |
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
import simplejson as json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try:
obj = json.load(infile,
object_pairs_hook=json.OrderedDict,
use_decimal=True)
except ValueError, e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True)
outfile.write('\n')
if __name__ == '__main__':
main()
| Python |
"""JSON token scanner
"""
import re
def _import_c_make_scanner():
try:
from simplejson._speedups import make_scanner
return make_scanner
except ImportError:
return None
c_make_scanner = _import_c_make_scanner()
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
memo = context.memo
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook, object_pairs_hook, memo)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
def scan_once(string, idx):
try:
return _scan_once(string, idx)
finally:
memo.clear()
return scan_once
make_scanner = c_make_scanner or py_make_scanner
| Python |
"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
from UserDict import DictMixin
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to encapsulate a single HTTP request.
The classes implement a command pattern, with every
object supporting an execute() method that does the
actuall HTTP request.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = [
'HttpRequest', 'RequestMockBuilder', 'HttpMock'
'set_user_agent', 'tunnel_patch'
]
import httplib2
import os
from model import JsonModel
from errors import HttpError
from anyjson import simplejson
class HttpRequest(object):
"""Encapsulates a single HTTP request.
"""
def __init__(self, http, postproc, uri,
method='GET',
body=None,
headers=None,
methodId=None):
"""Constructor for an HttpRequest.
Args:
http: httplib2.Http, the transport object to use to make a request
postproc: callable, called on the HTTP response and content to transform
it into a data object before returning, or raising an exception
on an error.
uri: string, the absolute URI to send the request to
method: string, the HTTP method to use
body: string, the request body of the HTTP request
headers: dict, the HTTP request headers
methodId: string, a unique identifier for the API method being called.
"""
self.uri = uri
self.method = method
self.body = body
self.headers = headers or {}
self.http = http
self.postproc = postproc
def execute(self, http=None):
"""Execute the request.
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
Returns:
A deserialized object model of the response body as determined
by the postproc.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
if http is None:
http = self.http
resp, content = http.request(self.uri, self.method,
body=self.body,
headers=self.headers)
if resp.status >= 300:
raise HttpError(resp, content, self.uri)
return self.postproc(resp, content)
class HttpRequestMock(object):
"""Mock of HttpRequest.
Do not construct directly, instead use RequestMockBuilder.
"""
def __init__(self, resp, content, postproc):
"""Constructor for HttpRequestMock
Args:
resp: httplib2.Response, the response to emulate coming from the request
content: string, the response body
postproc: callable, the post processing function usually supplied by
the model class. See model.JsonModel.response() as an example.
"""
self.resp = resp
self.content = content
self.postproc = postproc
if resp is None:
self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
if 'reason' in self.resp:
self.resp.reason = self.resp['reason']
def execute(self, http=None):
"""Execute the request.
Same behavior as HttpRequest.execute(), but the response is
mocked and not really from an HTTP request/response.
"""
return self.postproc(self.resp, self.content)
class RequestMockBuilder(object):
"""A simple mock of HttpRequest
Pass in a dictionary to the constructor that maps request methodIds to
tuples of (httplib2.Response, content) that should be returned when that
method is called. None may also be passed in for the httplib2.Response, in
which case a 200 OK response will be generated.
Example:
response = '{"data": {"id": "tag:google.c...'
requestBuilder = RequestMockBuilder(
{
'chili.activities.get': (None, response),
}
)
apiclient.discovery.build("buzz", "v1", requestBuilder=requestBuilder)
Methods that you do not supply a response for will return a
200 OK with an empty string as the response content. The methodId
is taken from the rpcName in the discovery document.
For more details see the project wiki.
"""
def __init__(self, responses):
"""Constructor for RequestMockBuilder
The constructed object should be a callable object
that can replace the class HttpResponse.
responses - A dictionary that maps methodIds into tuples
of (httplib2.Response, content). The methodId
comes from the 'rpcName' field in the discovery
document.
"""
self.responses = responses
def __call__(self, http, postproc, uri, method='GET', body=None,
headers=None, methodId=None):
"""Implements the callable interface that discovery.build() expects
of requestBuilder, which is to build an object compatible with
HttpRequest.execute(). See that method for the description of the
parameters and the expected response.
"""
if methodId in self.responses:
resp, content = self.responses[methodId]
return HttpRequestMock(resp, content, postproc)
else:
model = JsonModel(False)
return HttpRequestMock(None, '{}', model.response)
class HttpMock(object):
"""Mock of httplib2.Http"""
def __init__(self, filename, headers=None):
"""
Args:
filename: string, absolute filename to read response from
headers: dict, header to return with response
"""
if headers is None:
headers = {'status': '200 OK'}
f = file(filename, 'r')
self.data = f.read()
f.close()
self.headers = headers
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
return httplib2.Response(self.headers), self.data
class HttpMockSequence(object):
"""Mock of httplib2.Http
Mocks a sequence of calls to request returning different responses for each
call. Create an instance initialized with the desired response headers
and content and then use as if an httplib2.Http instance.
http = HttpMockSequence([
({'status': '401'}, ''),
({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
({'status': '200'}, 'echo_request_headers'),
])
resp, content = http.request("http://examples.com")
There are special values you can pass in for content to trigger
behavours that are helpful in testing.
'echo_request_headers' means return the request headers in the response body
'echo_request_headers_as_json' means return the request headers in
the response body
'echo_request_body' means return the request body in the response body
"""
def __init__(self, iterable):
"""
Args:
iterable: iterable, a sequence of pairs of (headers, body)
"""
self._iterable = iterable
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
resp, content = self._iterable.pop(0)
if content == 'echo_request_headers':
content = headers
elif content == 'echo_request_headers_as_json':
content = simplejson.dumps(headers)
elif content == 'echo_request_body':
content = body
return httplib2.Response(resp), content
def set_user_agent(http, user_agent):
"""Set the user-agent on every request.
Args:
http - An instance of httplib2.Http
or something that acts like it.
user_agent: string, the value for the user-agent header.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = set_user_agent(h, "my-app-name/6.0")
Most of the time the user-agent will be set doing auth, this is for the rare
cases where you are accessing an unauthenticated endpoint.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if 'user-agent' in headers:
headers['user-agent'] = user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
def tunnel_patch(http):
"""Tunnel PATCH requests over POST.
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = tunnel_patch(h, "my-app-name/6.0")
Useful if you are running on a platform that doesn't support PATCH.
Apply this last if you are using OAuth 1.0, as changing the method
will result in a different signature.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if method == 'PATCH':
if 'oauth_token' in headers.get('authorization', ''):
logging.warning(
'OAuth 1.0 request made with Credentials after tunnel_patch.')
headers['x-http-method-override'] = "PATCH"
method = 'POST'
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
import httplib2
import logging
import oauth2 as oauth
import urllib
import urlparse
from anyjson import simplejson
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
class Error(Exception):
"""Base error for this module."""
pass
class RequestError(Error):
"""Error occurred during request."""
pass
class MissingParameter(Error):
pass
class CredentialsInvalidError(Error):
pass
def _abstract():
raise NotImplementedError('You need to override this function')
def _oauth_uri(name, discovery, params):
"""Look up the OAuth URI from the discovery
document and add query parameters based on
params.
name - The name of the OAuth URI to lookup, one
of 'request', 'access', or 'authorize'.
discovery - Portion of discovery document the describes
the OAuth endpoints.
params - Dictionary that is used to form the query parameters
for the specified URI.
"""
if name not in ['request', 'access', 'authorize']:
raise KeyError(name)
keys = discovery[name]['parameters'].keys()
query = {}
for key in keys:
if key in params:
query[key] = params[key]
return discovery[name]['url'] + '?' + urllib.urlencode(query)
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method
that applies the credentials to an HTTP transport.
"""
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential.
"""
def get(self):
"""Retrieve credential.
Returns:
apiclient.oauth.Credentials
"""
_abstract()
def put(self, credentials):
"""Write a credential.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
class OAuthCredentials(Credentials):
"""Credentials object for OAuth 1.0a
"""
def __init__(self, consumer, token, user_agent):
"""
consumer - An instance of oauth.Consumer.
token - An instance of oauth.Token constructed with
the access token and secret.
user_agent - The HTTP User-Agent to provide for this application.
"""
self.consumer = consumer
self.token = token
self.user_agent = user_agent
self.store = None
# True if the credentials have been revoked
self._invalid = False
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked."""
return getattr(self, "_invalid", False)
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has been revoked.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def authorize(self, http):
"""
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
signer = oauth.SignatureMethod_HMAC_SHA1()
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
response_code = 302
http.follow_redirects = False
while response_code in [301, 302]:
req = oauth.Request.from_consumer_and_token(
self.consumer, self.token, http_method=method, http_url=uri)
req.sign_request(signer, self.consumer, self.token)
if headers is None:
headers = {}
headers.update(req.to_header())
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
response_code = resp.status
if response_code in [301, 302]:
uri = resp['location']
# Update the stored credential if it becomes invalid.
if response_code == 401:
logging.info('Access token no longer valid: %s' % content)
self._invalid = True
if self.store is not None:
self.store(self)
raise CredentialsInvalidError("Credentials are no longer valid.")
return resp, content
http.request = new_request
return http
class FlowThreeLegged(Flow):
"""Does the Three Legged Dance for OAuth 1.0a.
"""
def __init__(self, discovery, consumer_key, consumer_secret, user_agent,
**kwargs):
"""
discovery - Section of the API discovery document that describes
the OAuth endpoints.
consumer_key - OAuth consumer key
consumer_secret - OAuth consumer secret
user_agent - The HTTP User-Agent that identifies the application.
**kwargs - The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.discovery = discovery
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.user_agent = user_agent
self.params = kwargs
self.request_token = {}
required = {}
for uriinfo in discovery.itervalues():
for name, value in uriinfo['parameters'].iteritems():
if value['required'] and not name.startswith('oauth_'):
required[name] = 1
for key in required.iterkeys():
if key not in self.params:
raise MissingParameter('Required parameter %s not supplied' % key)
def step1_get_authorize_url(self, oauth_callback='oob'):
"""Returns a URI to redirect to the provider.
oauth_callback - Either the string 'oob' for a non-web-based application,
or a URI that handles the callback from the authorization
server.
If oauth_callback is 'oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
body = urllib.urlencode({'oauth_callback': oauth_callback})
uri = _oauth_uri('request', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers,
body=body)
if resp['status'] != '200':
logging.error('Failed to retrieve temporary authorization: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
self.request_token = dict(parse_qsl(content))
auth_params = copy.copy(self.params)
auth_params['oauth_token'] = self.request_token['oauth_token']
return _oauth_uri('authorize', self.discovery, auth_params)
def step2_exchange(self, verifier):
"""Exhanges an authorized request token
for OAuthCredentials.
Args:
verifier: string, dict - either the verifier token, or a dictionary
of the query parameters to the callback, which contains
the oauth_verifier.
Returns:
The Credentials object.
"""
if not (isinstance(verifier, str) or isinstance(verifier, unicode)):
verifier = verifier['oauth_verifier']
token = oauth.Token(
self.request_token['oauth_token'],
self.request_token['oauth_token_secret'])
token.set_verifier(verifier)
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer, token)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
uri = _oauth_uri('access', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers)
if resp['status'] != '200':
logging.error('Failed to retrieve access token: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
oauth_params = dict(parse_qsl(content))
token = oauth.Token(
oauth_params['oauth_token'],
oauth_params['oauth_token_secret'])
return OAuthCredentials(consumer, token, self.user_agent)
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model objects for requests and responses.
Each API may support one or more serializations, such
as JSON, Atom, etc. The model classes are responsible
for converting between the wire format and the Python
object representation.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import logging
import urllib
from anyjson import simplejson
from errors import HttpError
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('dump_request_response', False,
'Dump all http server requests and responses. '
)
def _abstract():
raise NotImplementedError('You need to override this function')
class Model(object):
"""Model base class.
All Model classes should implement this interface.
The Model serializes and de-serializes between a wire
format such as JSON and a Python object representation.
"""
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized in the desired wire format.
"""
_abstract()
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
_abstract()
class BaseModel(Model):
"""Base model class.
Subclasses should provide implementations for the "serialize" and
"deserialize" methods, as well as values for the following class attributes.
Attributes:
accept: The value to use for the HTTP Accept header.
content_type: The value to use for the HTTP Content-type header.
no_content_response: The value to return when deserializing a 204 "No
Content" response.
alt_param: The value to supply as the "alt" query parameter for requests.
"""
accept = None
content_type = None
no_content_response = None
alt_param = None
def _log_request(self, headers, path_params, query, body):
"""Logs debugging information about the request if requested."""
if FLAGS.dump_request_response:
logging.info('--request-start--')
logging.info('-headers-start-')
for h, v in headers.iteritems():
logging.info('%s: %s', h, v)
logging.info('-headers-end-')
logging.info('-path-parameters-start-')
for h, v in path_params.iteritems():
logging.info('%s: %s', h, v)
logging.info('-path-parameters-end-')
logging.info('body: %s', body)
logging.info('query: %s', query)
logging.info('--request-end--')
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
query = self._build_query(query_params)
headers['accept'] = self.accept
headers['accept-encoding'] = 'gzip, deflate'
if 'user-agent' in headers:
headers['user-agent'] += ' '
else:
headers['user-agent'] = ''
headers['user-agent'] += 'google-api-python-client/1.0'
if body_value is not None:
headers['content-type'] = self.content_type
body_value = self.serialize(body_value)
self._log_request(headers, path_params, query, body_value)
return (headers, path_params, query, body_value)
def _build_query(self, params):
"""Builds a query string.
Args:
params: dict, the query parameters
Returns:
The query parameters properly encoded into an HTTP URI query string.
"""
params.update({'alt': self.alt_param})
astuples = []
for key, value in params.iteritems():
if type(value) == type([]):
for x in value:
x = x.encode('utf-8')
astuples.append((key, x))
else:
if getattr(value, 'encode', False) and callable(value.encode):
value = value.encode('utf-8')
astuples.append((key, value))
return '?' + urllib.urlencode(astuples)
def _log_response(self, resp, content):
"""Logs debugging information about the response if requested."""
if FLAGS.dump_request_response:
logging.info('--response-start--')
for h, v in resp.iteritems():
logging.info('%s: %s', h, v)
if content:
logging.info(content)
logging.info('--response-end--')
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
self._log_response(resp, content)
# Error handling is TBD, for example, do we retry
# for some operation/error combinations?
if resp.status < 300:
if resp.status == 204:
# A 204: No Content response should be treated differently
# to all the other success states
return self.no_content_response
return self.deserialize(content)
else:
logging.debug('Content from bad request was: %s' % content)
raise HttpError(resp, content)
def serialize(self, body_value):
"""Perform the actual Python object serialization.
Args:
body_value: object, the request body as a Python object.
Returns:
string, the body in serialized form.
"""
_abstract()
def deserialize(self, content):
"""Perform the actual deserialization from response string to Python object.
Args:
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
"""
_abstract()
class JsonModel(BaseModel):
"""Model class for JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request and response bodies.
"""
accept = 'application/json'
content_type = 'application/json'
alt_param = 'json'
def __init__(self, data_wrapper=False):
"""Construct a JsonModel.
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper
"""
self._data_wrapper = data_wrapper
def serialize(self, body_value):
if (isinstance(body_value, dict) and 'data' not in body_value and
self._data_wrapper):
body_value = {'data': body_value}
return simplejson.dumps(body_value)
def deserialize(self, content):
body = simplejson.loads(content)
if isinstance(body, dict) and 'data' in body:
body = body['data']
return body
@property
def no_content_response(self):
return {}
class ProtocolBufferModel(BaseModel):
"""Model class for protocol buffers.
Serializes and de-serializes the binary protocol buffer sent in the HTTP
request and response bodies.
"""
accept = 'application/x-protobuf'
content_type = 'application/x-protobuf'
alt_param = 'proto'
def __init__(self, protocol_buffer):
"""Constructs a ProtocolBufferModel.
The serialzed protocol buffer returned in an HTTP response will be
de-serialized using the given protocol buffer class.
Args:
protocol_buffer: The protocol buffer class used to de-serialize a response
from the API.
"""
self._protocol_buffer = protocol_buffer
def serialize(self, body_value):
return body_value.SerializeToString()
def deserialize(self, content):
return self._protocol_buffer.FromString(content)
@property
def no_content_response(self):
return self._protocol_buffer()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for discovery based APIs
A client library for Google's discovery based APIs.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = [
'build', 'build_from_document'
]
import httplib2
import logging
import os
import re
import uritemplate
import urllib
import urlparse
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
from http import HttpRequest
from anyjson import simplejson
from model import JsonModel
from errors import UnknownLinkType
from errors import HttpError
from errors import InvalidJsonError
URITEMPLATE = re.compile('{[^}]*}')
VARNAME = re.compile('[a-zA-Z0-9_-]+')
DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
'{api}/{apiVersion}/rest')
DEFAULT_METHOD_DOC = 'A description of how to use this function'
# Query parameters that work, but don't appear in discovery
STACK_QUERY_PARAMETERS = ['trace', 'fields', 'pp', 'prettyPrint', 'userIp',
'userip', 'strict']
def key2param(key):
"""Converts key names into parameter names.
For example, converting "max-results" -> "max_results"
"""
result = []
key = list(key)
if not key[0].isalpha():
result.append('x')
for c in key:
if c.isalnum():
result.append(c)
else:
result.append('_')
return ''.join(result)
def build(serviceName, version,
http=None,
discoveryServiceUrl=DISCOVERY_URI,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Construct a Resource for interacting with an API.
Construct a Resource object for interacting with
an API. The serviceName and version are the
names from the Discovery service.
Args:
serviceName: string, name of the service
version: string, the version of the service
discoveryServiceUrl: string, a URI Template that points to
the location of the discovery service. It should have two
parameters {api} and {apiVersion} that when filled in
produce an absolute URI to the discovery document for
that service.
developerKey: string, key obtained
from https://code.google.com/apis/console
model: apiclient.Model, converts to and from the wire format
requestBuilder: apiclient.http.HttpRequest, encapsulator for
an HTTP request
Returns:
A Resource object with methods for interacting with
the service.
"""
params = {
'api': serviceName,
'apiVersion': version
}
if http is None:
http = httplib2.Http()
requested_url = uritemplate.expand(discoveryServiceUrl, params)
logging.info('URL being requested: %s' % requested_url)
resp, content = http.request(requested_url)
if resp.status > 400:
raise HttpError(resp, content, requested_url)
try:
service = simplejson.loads(content)
except ValueError, e:
logging.error('Failed to parse as JSON: ' + content)
raise InvalidJsonError()
fn = os.path.join(os.path.dirname(__file__), 'contrib',
serviceName, 'future.json')
try:
f = file(fn, 'r')
future = f.read()
f.close()
except IOError:
future = None
return build_from_document(content, discoveryServiceUrl, future,
http, developerKey, model, requestBuilder)
def build_from_document(
service,
base,
future=None,
http=None,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Create a Resource for interacting with an API.
Same as `build()`, but constructs the Resource object
from a discovery document that is it given, as opposed to
retrieving one over HTTP.
Args:
service: string, discovery document
base: string, base URI for all HTTP requests, usually the discovery URI
future: string, discovery document with future capabilities
auth_discovery: dict, information about the authentication the API supports
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
developerKey: string, Key for controlling API usage, generated
from the API Console.
model: Model class instance that serializes and
de-serializes requests and responses.
requestBuilder: Takes an http request and packages it up to be executed.
Returns:
A Resource object with methods for interacting with
the service.
"""
service = simplejson.loads(service)
base = urlparse.urljoin(base, service['basePath'])
if future:
future = simplejson.loads(future)
auth_discovery = future.get('auth', {})
else:
future = {}
auth_discovery = {}
if model is None:
features = service.get('features', [])
model = JsonModel('dataWrapper' in features)
resource = createResource(http, base, model, requestBuilder, developerKey,
service, future)
def auth_method():
"""Discovery information about the authentication the API uses."""
return auth_discovery
setattr(resource, 'auth_discovery', auth_method)
return resource
def _cast(value, schema_type):
"""Convert value to a string based on JSON Schema type.
See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
JSON Schema.
Args:
value: any, the value to convert
schema_type: string, the type that value should be interpreted as
Returns:
A string representation of 'value' based on the schema_type.
"""
if schema_type == 'string':
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
elif schema_type == 'integer':
return str(int(value))
elif schema_type == 'number':
return str(float(value))
elif schema_type == 'boolean':
return str(bool(value)).lower()
else:
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
def createResource(http, baseUrl, model, requestBuilder,
developerKey, resourceDesc, futureDesc):
class Resource(object):
"""A class for interacting with a resource."""
def __init__(self):
self._http = http
self._baseUrl = baseUrl
self._model = model
self._developerKey = developerKey
self._requestBuilder = requestBuilder
def createMethod(theclass, methodName, methodDesc, futureDesc):
pathUrl = methodDesc['path']
httpMethod = methodDesc['httpMethod']
methodId = methodDesc['id']
if 'parameters' not in methodDesc:
methodDesc['parameters'] = {}
for name in STACK_QUERY_PARAMETERS:
methodDesc['parameters'][name] = {
'type': 'string',
'location': 'query'
}
if httpMethod in ['PUT', 'POST', 'PATCH']:
methodDesc['parameters']['body'] = {
'description': 'The request body.',
'type': 'object',
'required': True,
}
argmap = {} # Map from method parameter name to query parameter name
required_params = [] # Required parameters
repeated_params = [] # Repeated parameters
pattern_params = {} # Parameters that must match a regex
query_params = [] # Parameters that will be used in the query string
path_params = {} # Parameters that will be used in the base URL
param_type = {} # The type of the parameter
enum_params = {} # Allowable enumeration values for each parameter
if 'parameters' in methodDesc:
for arg, desc in methodDesc['parameters'].iteritems():
param = key2param(arg)
argmap[param] = arg
if desc.get('pattern', ''):
pattern_params[param] = desc['pattern']
if desc.get('enum', ''):
enum_params[param] = desc['enum']
if desc.get('required', False):
required_params.append(param)
if desc.get('repeated', False):
repeated_params.append(param)
if desc.get('location') == 'query':
query_params.append(param)
if desc.get('location') == 'path':
path_params[param] = param
param_type[param] = desc.get('type', 'string')
for match in URITEMPLATE.finditer(pathUrl):
for namematch in VARNAME.finditer(match.group(0)):
name = key2param(namematch.group(0))
path_params[name] = name
if name in query_params:
query_params.remove(name)
def method(self, **kwargs):
for name in kwargs.iterkeys():
if name not in argmap:
raise TypeError('Got an unexpected keyword argument "%s"' % name)
for name in required_params:
if name not in kwargs:
raise TypeError('Missing required parameter "%s"' % name)
for name, regex in pattern_params.iteritems():
if name in kwargs:
if re.match(regex, kwargs[name]) is None:
raise TypeError(
'Parameter "%s" value "%s" does not match the pattern "%s"' %
(name, kwargs[name], regex))
for name, enums in enum_params.iteritems():
if name in kwargs:
if kwargs[name] not in enums:
raise TypeError(
'Parameter "%s" value "%s" is not an allowed value in "%s"' %
(name, kwargs[name], str(enums)))
actual_query_params = {}
actual_path_params = {}
for key, value in kwargs.iteritems():
to_type = param_type.get(key, 'string')
# For repeated parameters we cast each member of the list.
if key in repeated_params and type(value) == type([]):
cast_value = [_cast(x, to_type) for x in value]
else:
cast_value = _cast(value, to_type)
if key in query_params:
actual_query_params[argmap[key]] = cast_value
if key in path_params:
actual_path_params[argmap[key]] = cast_value
body_value = kwargs.get('body', None)
if self._developerKey:
actual_query_params['key'] = self._developerKey
headers = {}
headers, params, query, body = self._model.request(headers,
actual_path_params, actual_query_params, body_value)
# TODO(ade) This exists to fix a bug in V1 of the Buzz discovery
# document. Base URLs should not contain any path elements. If they do
# then urlparse.urljoin will strip them out This results in an incorrect
# URL which returns a 404
url_result = urlparse.urlsplit(self._baseUrl)
new_base_url = url_result[0] + '://' + url_result[1]
expanded_url = uritemplate.expand(pathUrl, params)
url = urlparse.urljoin(self._baseUrl,
url_result[2] + expanded_url + query)
logging.info('URL being requested: %s' % url)
return self._requestBuilder(self._http,
self._model.response,
url,
method=httpMethod,
body=body,
headers=headers,
methodId=methodId)
docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
if len(argmap) > 0:
docs.append('Args:\n')
for arg in argmap.iterkeys():
if arg in STACK_QUERY_PARAMETERS:
continue
repeated = ''
if arg in repeated_params:
repeated = ' (repeated)'
required = ''
if arg in required_params:
required = ' (required)'
paramdesc = methodDesc['parameters'][argmap[arg]]
paramdoc = paramdesc.get('description', 'A parameter')
paramtype = paramdesc.get('type', 'string')
docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
repeated))
enum = paramdesc.get('enum', [])
enumDesc = paramdesc.get('enumDescriptions', [])
if enum and enumDesc:
docs.append(' Allowed values\n')
for (name, desc) in zip(enum, enumDesc):
docs.append(' %s - %s\n' % (name, desc))
setattr(method, '__doc__', ''.join(docs))
setattr(theclass, methodName, method)
def createNextMethod(theclass, methodName, methodDesc, futureDesc):
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous):
"""
Takes a single argument, 'body', which is the results
from the last call, and returns the next set of items
in the collection.
Returns None if there are no more items in
the collection.
"""
if futureDesc['type'] != 'uri':
raise UnknownLinkType(futureDesc['type'])
try:
p = previous
for key in futureDesc['location']:
p = p[key]
url = p
except (KeyError, TypeError):
return None
if self._developerKey:
parsed = list(urlparse.urlparse(url))
q = parse_qsl(parsed[4])
q.append(('key', self._developerKey))
parsed[4] = urllib.urlencode(q)
url = urlparse.urlunparse(parsed)
headers = {}
headers, params, query, body = self._model.request(headers, {}, {}, None)
logging.info('URL being requested: %s' % url)
resp, content = self._http.request(url, method='GET', headers=headers)
return self._requestBuilder(self._http,
self._model.response,
url,
method='GET',
headers=headers,
methodId=methodId)
setattr(theclass, methodName, methodNext)
# Add basic methods to Resource
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if futureDesc:
future = futureDesc['methods'].get(methodName, {})
else:
future = None
createMethod(Resource, methodName, methodDesc, future)
# Add in nested resources
if 'resources' in resourceDesc:
def createResourceMethod(theclass, methodName, methodDesc, futureDesc):
def methodResource(self):
return createResource(self._http, self._baseUrl, self._model,
self._requestBuilder, self._developerKey,
methodDesc, futureDesc)
setattr(methodResource, '__doc__', 'A collection resource.')
setattr(methodResource, '__is_resource__', True)
setattr(theclass, methodName, methodResource)
for methodName, methodDesc in resourceDesc['resources'].iteritems():
if futureDesc and 'resources' in futureDesc:
future = futureDesc['resources'].get(methodName, {})
else:
future = {}
createResourceMethod(Resource, methodName, methodDesc, future)
# Add <m>_next() methods to Resource
if futureDesc and 'methods' in futureDesc:
for methodName, methodDesc in futureDesc['methods'].iteritems():
if 'next' in methodDesc and methodName in resourceDesc['methods']:
createNextMethod(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodDesc['next'])
return Resource()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 1.0 credentials.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
import threading
from apiclient.oauth import Storage as BaseStorage
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def get(self):
"""Retrieve Credential from file.
Returns:
apiclient.oauth.Credentials
"""
self._lock.acquire()
try:
f = open(self._filename, 'r')
credentials = pickle.loads(f.read())
f.close()
credentials.set_store(self.put)
except:
credentials = None
self._lock.release()
return credentials
def put(self, credentials):
"""Write a pickled Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
self._lock.acquire()
f = open(self._filename, 'w')
f.write(pickle.dumps(credentials))
f.close()
self._lock.release()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apiclient
import base64
import pickle
from django.db import models
class OAuthCredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
if value is None:
return None
if isinstance(value, apiclient.oauth.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class FlowThreeLeggedField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
print "In to_python", value
if value is None:
return None
if isinstance(value, apiclient.oauth.FlowThreeLegged):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use the
Google API Client for Python on Google App Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
from google.appengine.ext import db
from apiclient.oauth import OAuthCredentials
from apiclient.oauth import FlowThreeLegged
class FlowThreeLeggedProperty(db.Property):
"""Utility property that allows easy
storage and retreival of an
apiclient.oauth.FlowThreeLegged"""
# Tell what the user type is.
data_type = FlowThreeLegged
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowThreeLeggedProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, FlowThreeLegged):
raise BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowThreeLeggedProperty, self).validate(value)
def empty(self, value):
return not value
class OAuthCredentialsProperty(db.Property):
"""Utility property that allows easy
storage and retrieval of
apiclient.oath.OAuthCredentials
"""
# Tell what the user type is.
data_type = OAuthCredentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
cred = super(OAuthCredentialsProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(cred))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, OAuthCredentials):
raise BadValueError('Property %s must be convertible '
'to an OAuthCredentials instance (%s)' %
(self.name, value))
return super(OAuthCredentialsProperty, self).validate(value)
def empty(self, value):
return not value
class StorageByKeyName(object):
"""Store and retrieve a single credential to and from
the App Engine datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsProperty
on a datastore model class, and that entities
are stored by key_name.
"""
def __init__(self, model, key_name, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a CredentialsProperty
"""
self.model = model
self.key_name = key_name
self.property_name = property_name
def get(self):
"""Retrieve Credential from datastore.
Returns:
Credentials
"""
entity = self.model.get_or_insert(self.key_name)
credential = getattr(entity, self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self.put)
return credential
def put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self.model.get_or_insert(self.key_name)
setattr(entity, self.property_name, credentials)
entity.put()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 1.0
Do the OAuth 1.0 Three Legged Dance for
a command line application. Stores the generated
credentials in a common file that is used by
other example apps in the same directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ["run"]
import BaseHTTPServer
import logging
import socket
import sys
from optparse import OptionParser
from apiclient.oauth import RequestError
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 1.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(s):
"""Handle a GET request
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
query = s.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
s.server.query_params = query
s.wfile.write("<html><head><title>Authentication Status</title></head>")
s.wfile.write("<body><p>The authentication flow has completed.</p>")
s.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def run(flow, storage):
"""Core code for a command-line application.
Args:
flow: Flow, an OAuth 1.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
Returns:
Credentials, the obtained credential.
Exceptions:
RequestError: if step2 of the flow fails.
Args:
"""
parser = OptionParser()
parser.add_option("-p", "--no_local_web_server", dest="localhost",
action="store_false",
default=True,
help="Do not run a web server on localhost to handle redirect URIs")
parser.add_option("-w", "--local_web_server", dest="localhost",
action="store_true",
default=True,
help="Run a web server on localhost to handle redirect URIs")
(options, args) = parser.parse_args()
host_name = 'localhost'
port_numbers = [8080, 8090]
if options.localhost:
server_class = BaseHTTPServer.HTTPServer
try:
port_number = port_numbers[0]
httpd = server_class((host_name, port_number), ClientRedirectHandler)
except socket.error:
port_number = port_numbers[1]
try:
httpd = server_class((host_name, port_number), ClientRedirectHandler)
except socket.error:
options.localhost = False
if options.localhost:
oauth_callback = 'http://%s:%s/' % (host_name, port_number)
else:
oauth_callback = 'oob'
authorize_url = flow.step1_get_authorize_url(oauth_callback)
print 'Go to the following link in your browser:'
print authorize_url
print
if options.localhost:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'oauth_verifier' in httpd.query_params:
code = httpd.query_params['oauth_verifier']
else:
accepted = 'n'
while accepted.lower() == 'n':
accepted = raw_input('Have you authorized me? (y/n) ')
code = raw_input('What is the verification code? ').strip()
try:
credentials = flow.step2_exchange(code)
except RequestError:
sys.exit('The authentication has failed.')
storage.put(credentials)
credentials.set_store(storage.put)
print "You have successfully authenticated."
return credentials
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Errors for the library.
All exceptions defined by the library
should be defined in this file.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from anyjson import simplejson
class Error(Exception):
"""Base error for this module."""
pass
class HttpError(Error):
"""HTTP data was invalid or unexpected."""
def __init__(self, resp, content, uri=None):
self.resp = resp
self.content = content
self.uri = uri
def _get_reason(self):
"""Calculate the reason for the error from the response content.
"""
if self.resp.get('content-type', '').startswith('application/json'):
try:
data = simplejson.loads(self.content)
reason = data['error']['message']
except (ValueError, KeyError):
reason = self.content
else:
reason = self.resp.reason
return reason
def __repr__(self):
if self.uri:
return '<HttpError %s when requesting %s returned "%s">' % (
self.resp.status, self.uri, self._get_reason())
else:
return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
__str__ = __repr__
class InvalidJsonError(Error):
"""The JSON returned could not be parsed."""
pass
class UnknownLinkType(Error):
"""Link type unknown or unexpected."""
pass
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module to import a JSON module
Hides all the messy details of exactly where
we get a simplejson module from.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
try: # pragma: no cover
import simplejson
except ImportError: # pragma: no cover
try:
# Try to import from django, should work on App Engine
from django.utils import simplejson
except ImportError:
# Should work for Python2.6 and higher.
import json as simplejson
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.