code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
"""Module to implement the RadixSortManagers for CPU and GPU"""
# OpenCL conditional imports
import pysph.solver.cl_utils as clu
if clu.HAS_CL:
import pyopencl as cl
mf = cl.mem_flags
import numpy
from scanClass import Scan
class AMDRadixSort:
"""AMD's OPenCL implementation of the radix sort.
The C++ code for the implementation can be found in
the samples drectory of the AMD-APP-SDK (2.5)
This implementation assumes that each thread works
on 256 elements. This is also the minimum number of
elements required. The keys are assumed to be 32 bit
unsigned bits and we sort them 8 bits at a time. As
a result, the number of histogram bins/buckets for
this implementation is also 256.
Since we expect this radix sort routine to be used to sort
particle cell indices as keys and particle indices as values,
the values are also assumed to be unsigned integers.
"""
def __init__(self, radix=8):
"""Constructor.
Parameters:
------------
radix : int (8)
The number of bits per pass of the radix sort.
"""
# the following variables are analogous to the AMD's
# variables
self.radix = radix # number of bits at a time for each pass
self.radices = (1<<radix) # num of elements handled by each work-item
# the group size could be changed to any convenient size
self.group_size = 64
def initialize(self, keys, values=None, context=None):
"""Initialize the radix sort manager"""
# store the keys and values
self.keys = keys
# a keys only sort treats the values as keys
if values is None:
self.values = keys.copy()
else:
nvalues = len(values); nkeys = len(keys)
if not nvalues == nkeys:
raise RuntimeError( "len(keys) %d != len(values) %d"%(nkeys,
nvalues) )
self.values = values
# number of elements
self.n = len( keys )
# pad etc
self._setup()
# OpenCL setup
self._setup_cl(context)
def sort(self):
"""The main sorting routine"""
ctx = self.context
q = self.queue
radices = self.radices
ngroups = self.num_groups
groupsize = self.group_size
histograms = self.histograms
for _bits in range(0, 32, 8):
bits = numpy.int32(_bits)
# compute the histogram on the device
self._histogram( bits )
# Scan the histogram on the host
_sum = 0
for i in range(radices):
for j in range(ngroups):
for k in range(groupsize):
index = j * groupsize * radices + k * radices + i
value = histograms[index]
histograms[index] = _sum
_sum += value
# permute the data on the device
self._permute( bits )
# current output becomes input for the next pass
self._keys[:] = self.sortedkeys[:]
self._values[:] = self.sortedvalues[:]
# read only the original un-padded data into keys
self.keys[:] = self._keys[:self.n]
self.values[:] = self._values[:self.n]
def _histogram(self, bits):
"""Launch the histogram kernel
Each thread will load it's work region (256 values) into
shared memory and will compute the histogram/frequency of
occurance of each element. Remember that the implementation
assumes that we sort the 32 bit keys and values 8 bits at a
time and as such the histogram bins/buckets for each thread
are also 256.
We first copy the currenty unsorted data to the device before
calculating local memory size and then launching the kernel.
After the kernel launch, we read the computed thread
histograms to the host, where these will be scanned.
"""
ctx = self.context
q = self.queue
# global/local sizes
global_sizes = (self.nelements/self.radices,)
local_sizes = (self.group_size,)
# copy the unsorted data to the device
# the unsorted data is in _keys and dkeys
#clu.enqueue_copy(q, src=self._keys, dst=self.dkeys)
# allocate the local memory for the histogram kernel
local_mem_size = self.group_size * self.radices * 2
local_mem = cl.LocalMemory(size=local_mem_size)
# enqueue the kernel for execution
self.program.histogram(q, global_sizes, local_sizes,
self.dkeys, self.dhistograms,
bits, local_mem).wait()
# read the result to the host buffer
clu.enqueue_copy(q, src=self.dhistograms, dst=self.histograms)
def _permute(self, bits):
"""Launch the permute kernel
Using the host-scanned thread histograms, this kernel shuffles
the array values in the keys and values to perform the actual
sort.
We first copy the scanned histograms to the device, compute
local mem size and then launch the kernel. After the kernel
launch, the sorted keys and values are read back to the host
for the next pass.
"""
ctx = self.context
q = self.queue
# copy the scanned histograms to the device
clu.enqueue_copy(q, src=self.histograms,
dst=self.dscanedhistograms)
# global and local sizes
global_sizes = (self.nelements/self.radices,)
local_sizes = (self.group_size,)
# allocate local memory for the permute kernel launch
local_mem_size = self.group_size * self.radices * 2
local_mem = cl.LocalMemory(size=local_mem_size)
# enqueue the kernel for execution
self.program.permute(q, global_sizes, local_sizes,
self.dkeys, self.dvalues,
self.dscanedhistograms,
bits, local_mem,
self.dsortedkeys, self.dsortedvalues).wait()
# read sorted results back to the host
clu.enqueue_copy(q, src=self.dsortedkeys, dst=self.sortedkeys)
clu.enqueue_copy(q, src=self.dsortedvalues, dst=self.sortedvalues)
clu.enqueue_copy(q, src=self.dsortedkeys, dst=self.dkeys)
clu.enqueue_copy(q, src=self.dsortedvalues, dst=self.dvalues)
def _setup(self):
"""Prepare the data for the algorithm
The implementation requires the input array to have
a length equal to a power of 2. We test for this
condition and pad the keys with the special mask
value (1<<32 - 1) which has a bit pattern of all 1's
This particular padding and the ordered nature of
the radix sort results in these padded dummy keys
going to the end so we can simpy ignore them.
"""
# check the length of the input arrays
if not clu.ispowerof2(self.n):
n = clu.round_up(self.n)
pad = numpy.ones(n - self.n, numpy.int32) * clu.uint32mask()
# _keys and _values are the padded arrays we use internally
self._keys = numpy.concatenate( (self.keys,
pad) ).astype(numpy.uint32)
self._values = numpy.concatenate( (self.values,
pad) ).astype(numpy.uint32)
else:
self._keys = self.keys
self._values = self.values
# now store the number of elements and num work groups
self.nelements = len(self._keys)
self.num_groups = self.nelements/(self.group_size * self.radices)
def _setup_cl(self, context=None):
""" OpenCL setup. """
if context is None:
self.context = context = clu.create_some_context()
else:
self.context = context
self.queue = queue = cl.CommandQueue(context)
# allocate device memory
self._allocate_memory()
# create the program
self._create_program()
def _allocate_memory(self):
"""Allocate OpenCL work buffers."""
ctx = self.context
# first allocate the keys and values on the device
# these serve as the unsorted keys and values on the device
self.dkeys = cl.Buffer(ctx, mf.READ_WRITE|mf.COPY_HOST_PTR,
hostbuf=self._keys)
self.dvalues = cl.Buffer(ctx, mf.READ_WRITE|mf.COPY_HOST_PTR,
hostbuf=self._values)
# Output from the histogram kernel
# each thread will write it's histogram/count
# for the 256 elements it's checking for. Thus,
# the size of this buffer should be:
# numgroups * local_size * num_elements_per work item
size = self.group_size * self.radices * self.num_groups
self.histograms = numpy.ones(size, numpy.uint32)
self.dhistograms = cl.Buffer(ctx, mf.READ_WRITE, size=size*4)
# Input for the permute kernel.
# For this kernel, the output for the histogram kernel is
# scanned and used as input to the permute kernel. Thus,
# the size requirement is the same.
self.dscanedhistograms = cl.Buffer(ctx, mf.READ_WRITE, size=size*4)
# the final output or the sorted output.
# This should obviously be of size num_elements
self.sortedkeys = numpy.ones(self.nelements, numpy.uint32)
self.dsortedkeys = cl.Buffer(ctx, mf.READ_WRITE, size=self.nelements*4)
self.sortedvalues = numpy.ones(self.nelements, numpy.uint32)
self.dsortedvalues = cl.Buffer(ctx, mf.READ_WRITE,
size=self.nelements*4)
def _create_program(self):
"""Read the OpenCL kernel file and build"""
src_file = clu.get_pysph_root() + '/base/RadixSort_Kernels.cl'
src = open(src_file).read()
self.program = cl.Program(self.context, src).build()
def _sort_cpu(self, keys, values=None):
"""Perform a reference radix sort for verification on the CPU
The reference implemetation is analogous to the AMD-APP-SDK's
reference host implementation.
"""
if values is None:
values = keys.copy()
n = len(keys)
sortedkeys = numpy.ones(n, numpy.uint32)
sortedvalues = numpy.ones(n, numpy.uint32)
mask = self.radices - 1
# allocate the histogram buffer. This is simply a buffer of
# length RADICES (256)
histograms = numpy.zeros(self.radices, numpy.uint32)
# Sort the data
for bits in range(0, 32, self.radix):
# initialize the histograms to 0
histograms[:] = 0
# calculate histograms for all elements
for i in range(n):
element = keys[i]
val = (element >> bits) & mask
histograms[val] += 1
# scan the histograms (exclusive)
_sum = 0.0
for i in range(self.radices):
val = histograms[i]
histograms[i] = _sum
_sum += val
# permute the keys and values
for i in range(n):
element = keys[i]
val = ( element >> bits ) & mask
index = histograms[val]
sortedkeys[index] = keys[i]
sortedvalues[index] = values[i]
histograms[val] = index + 1
# swap the buffers for the next pass
keys[:] = sortedkeys[:]
values[:] = sortedvalues[:]
class NvidiaRadixSort:
"""
LICENSE
"""
def __init__(self, radix=8):
"""Constructor.
Parameters:
------------
radix : int (8)
The number of bits per pass of the radix sort.
"""
# the following variables are analogous to the AMD's
# variables
self.radix = radix # number of bits at a time for each pass
self.radices = (1<<radix) # num of elements handled by each work-item
# the group size could be changed to any convenient size
self.group_size = 64
def initialize(self, keys, values=None, context=None):
"""Initialize the radix sort manager"""
# store the keys and values
self.keys = keys
# a keys only sort treats the values as keys
if values is None:
self.values = keys.copy()
else:
nvalues = len(values); nkeys = len(keys)
if not nvalues == nkeys:
raise RuntimeError( "len(keys) %d != len(values) %d"%(nkeys,
nvalues) )
self.values = values
# number of elements
self.n = len( keys )
# pad etc
self._setup()
# OpenCL setup
self._setup_cl(context)
def sort(self):
keyBits = self.keyBits
self.radixSortKeysOnly(keyBits)
clu.enqueue_copy(self.queue, src=self.dkeys, dst=self.sortedkeys)
clu.enqueue_copy(self.queue, src=self.dvalues, dst=self.sortedvalues)
self.keys[:] = self.sortedkeys[:self.n]
self.values[:] = self.sortedvalues[:self.n]
def radixSortKeysOnly(self, keyBits):
i = numpy.uint32(0)
bitStep = self.bitStep
while (keyBits > i*bitStep):
self.radixSortStepKeysOnly(bitStep, i*bitStep)
i+=numpy.uint32(1)
def radixSortStepKeysOnly(self, nbits, startbit):
nelements = self.nelements
# create scan object
scan = Scan(self.context,
self.queue,
nelements)
# 4 step algo
ctaSize = self.ctaSize
# STEP I {radixSortBlocksKeysOnlyOCL}
totalBlocks = numpy.uint32(nelements/4/ctaSize)
globalWorkSize = (numpy.int(ctaSize*totalBlocks),)
localWorkSize = (numpy.int(ctaSize),)
# create Local Memory
self.local1 = cl.LocalMemory(size=numpy.int(4 * ctaSize * self.size_uint))
self.program.radixSortBlocksKeysOnly(self.queue, globalWorkSize, localWorkSize,
self.dkeys,
self.dsortedkeys,
self.dvalues,
self.dsortedvalues,
nbits,
startbit,
nelements,
totalBlocks,
self.local1).wait()
# STEP II
totalBlocks = numpy.uint32(nelements/2/ctaSize)
globalWorkSize = (numpy.int(ctaSize*totalBlocks),)
localWorkSize = (numpy.int(ctaSize),)
# create Local Memory
self.local2 = cl.LocalMemory(size=numpy.int(2 * ctaSize * self.size_uint))
self.program.findRadixOffsets(self.queue, globalWorkSize, localWorkSize,
self.dsortedkeys,
self.mCounters,
self.mBlockOffsets,
startbit,
nelements,
totalBlocks,
self.local2).wait()
# STEP III
scan.scanExclusiveLarge(self.mCountersSum, self.mCounters, 1, nelements/2/ctaSize*16)
# STEP IV
totalBlocks = numpy.uint32(nelements/2/ctaSize)
globalWorkSize = (numpy.int(ctaSize*totalBlocks),)
localWorkSize = (numpy.int(ctaSize),)
# create Local Memory
self.local3 = cl.LocalMemory(size=numpy.int(2 * ctaSize * self.size_uint))
self.local4 = cl.LocalMemory(size=numpy.int(2 * ctaSize * self.size_uint))
self.program.reorderDataKeysOnly(self.queue, globalWorkSize, localWorkSize,
self.dkeys,
self.dsortedkeys,
self.dvalues,
self.dsortedvalues,
self.mBlockOffsets,
self.mCountersSum,
self.mCounters,
startbit,
nelements,
totalBlocks,
self.local3,
self.local4).wait()
def _setup(self):
"""Prepare the data for the algorithm
The implementation requires the input array to have
a length equal to a power of 2. We test for this
condition and pad the keys with the special mask
value (1<<32 - 1) which has a bit pattern of all 1's
This particular padding and the ordered nature of
the radix sort results in these padded dummy keys
going to the end so we can simpy ignore them.
"""
# check the length of the input arrays
if not clu.ispowerof2(self.n):
n = clu.round_up(self.n)
pad = numpy.ones(n - self.n, numpy.int32) * clu.uint32mask()
# _keys and _values are the padded arrays we use internally
self._keys = numpy.concatenate( (self.keys,
pad) ).astype(numpy.uint32)
self._values = numpy.concatenate( (self.values,
pad) ).astype(numpy.uint32)
else:
self._keys = self.keys
self._values = self.values
# now store the number of elements and num work groups
self.nelements = numpy.uint32(len(self._keys))
def _setup_cl(self, context=None):
""" OpenCL setup. """
if context is None:
self.context = context = clu.create_some_context()
else:
self.context = context
self.queue = queue = cl.CommandQueue(context)
# allocate device memory
self._allocate_memory()
# create the program
self._create_program()
def _allocate_memory(self):
"""Allocate OpenCL work buffers."""
ctx = self.context
nelements = self.nelements
WARP_SIZE = 32
self.size_uint = size_uint = numpy.uint32(0).nbytes
self.keyBits = keybits = numpy.uint32(32)
self.bitStep = numpy.uint32(4)
if (nelements>=4096*4):
ctaSize = 128
elif(nelements<4096*4 and nelements>2048):
ctaSize = 64
else:
raise RuntimeError( "particles < 4096")
self.ctaSize = ctaSize
if ((nelements % (ctaSize * 4)) == 0):
numBlocks = numpy.uint32(nelements/(ctaSize * 4))
else:
numBlocks = numpy.uint32(nelements/(ctaSize * 4) + 1)
# first allocate the keys and values on the device
# these serve as the unsorted keys and values on the device
self.dkeys = cl.Buffer(ctx, mf.READ_WRITE|mf.COPY_HOST_PTR,
hostbuf=self._keys)
self.dvalues = cl.Buffer(ctx, mf.READ_WRITE|mf.COPY_HOST_PTR,
hostbuf=self._values)
# the final output or the sorted output.
# This should obviously be of size num_elements
self.sortedkeys = numpy.ones(self.nelements, dtype=numpy.uint32)
self.sortedvalues = numpy.ones(self.nelements, dtype=numpy.uint32)
# Buffers
self.dsortedkeys = cl.Buffer(ctx, mf.READ_WRITE, numpy.int(size_uint * self.nelements))
self.dsortedvalues = cl.Buffer(ctx, mf.READ_WRITE, numpy.int(size_uint * self.nelements))
self.mCounters = cl.Buffer(ctx, mf.READ_WRITE, numpy.int(WARP_SIZE * size_uint * numBlocks))
self.mCountersSum = cl.Buffer(ctx, mf.READ_WRITE, numpy.int(WARP_SIZE * size_uint * numBlocks))
self.mBlockOffsets = cl.Buffer(ctx, mf.READ_WRITE, numpy.int(WARP_SIZE * size_uint * numBlocks))
def _create_program(self):
"""Read the OpenCL kernel file and build"""
src_file = clu.get_pysph_root() + '/base/RadixSortVal.cl'
src = open(src_file).read()
self.program = cl.Program(self.context, src).build()
| Python |
import numpy
from pysph.solver.cl_utils import cl_read, get_real, HAS_CL, get_pysph_root,\
create_some_context, enqueue_copy
import pysph.solver.cl_utils as clu
if HAS_CL:
import pyopencl as cl
mf = cl.mem_flags
# Cython functions for neighbor list construction
from nnps_util import cbin, unflatten
from point import Point
from cell import py_find_cell_id
# radix sort class
from radix_sort import AMDRadixSort, NvidiaRadixSort
class DomainManagerType:
DomainManager = 0
LinkedListManager = 1
RadixSortManager = 2
class DomainManager:
def __init__(self, arrays, context=None, with_cl=True, device='CPU'):
if len(arrays) == 0:
raise RuntimeError("No Arrays provided!")
self.arrays = arrays
self.narrays = narrays = len(arrays)
# check if the arrays have unique names
if narrays > 1:
for i in range(1, narrays):
if arrays[i].name == arrays[i-1].name:
msg = "You mnust provide arrays with unique names!"
raise RuntimeError(msg)
if arrays[i].cl_precision != arrays[i-1].cl_precision:
msg = "Arrays cannot have different precision!"
raise RuntimeError(msg)
# set the cl_precision
self.cl_precision = arrays[0].cl_precision
# setup OpenCL
if with_cl:
if HAS_CL:
self.with_cl = True
self._setup_cl(context, device)
else:
raise RuntimeError("PyOpenCL not found!")
else:
self.with_cl = False
#######################################################################
# public interface
#######################################################################
def update(self):
pass
#######################################################################
# object interface
#######################################################################
def __iter__(self):
"""The Domain manager produces an iterator for all it's data.
This is needed as the function that will ask for cell
neighbors should be agnostic about the DomainManager type and
simply requires a means to iterate through it's data.
"""
return self
def next(self):
raise RuntimeError("Do not iterate over the DomainManager base class!")
###########################################################################
# non-public interface
###########################################################################
def _setup_cl(self, context=None, device=None):
""" OpenCL setup for the CLNNPSManager """
if not context:
if device=='GPU' or device=='gpu':
self.context = context = clu.create_context_from_gpu()
else:
self.context = context = clu.create_context_from_cpu()
else:
self.context = context
self.queue = queue = cl.CommandQueue(context)
# allocate the particle array device buffers
for i in range(self.narrays):
pa = self.arrays[i]
pa.setup_cl(context, queue)
# create the program
self._setup_program()
def _find_bounds(self):
""" Find the bounds for the particle arrays.
The bounds calculated are the simulation cube, defined by the
minimum and maximum extents of the particle arrays and the
maximum smoothing length which is used for determining an safe
cell size for binning.
"""
inf = numpy.inf
mx, my, mz = inf, inf, inf
Mx, My, Mz = -inf, -inf, -inf
Mh = 0.0
# update the minimum and maximum for the particle arrays
for pa in self.arrays:
pa.read_from_buffer()
pa.update_min_max(props=['x','y','z','h'])
if pa.properties['x'].minimum < mx:
mx = get_real( pa.properties['x'].minimum, self.cl_precision )
if pa.properties['y'].minimum < my:
my = get_real( pa.properties['y'].minimum, self.cl_precision )
if pa.properties['z'].minimum < mz:
mz = get_real( pa.properties['z'].minimum, self.cl_precision )
if pa.properties['x'].maximum > Mx:
Mx = get_real( pa.properties['x'].maximum, self.cl_precision )
if pa.properties['y'].maximum > My:
My = get_real( pa.properties['y'].maximum, self.cl_precision )
if pa.properties['z'].maximum > Mz:
Mz = get_real( pa.properties['z'].maximum, self.cl_precision )
if pa.properties['h'].maximum > Mh:
Mh = get_real( pa.properties['h'].maximum, self.cl_precision )
self.mx, self.my, self.mz = mx, my, mz
self.Mx, self.My, self.Mz = Mx, My, Mz
self.Mh = Mh
self._set_cell_size()
self._find_num_cells()
def _set_cell_size(self):
""" Set the cell size for binning
Notes:
------
If the cell size is being chosen based on the particle
smoothing lengths, we choose a cell size slightly larger than
$k\timesh$, where $k$ is the maximum scale factor for the SPH
kernel. Currently we use the size k + 1
If no bin sie is provided, the default
value 2*max(h) is used
"""
if not self.const_cell_size:
self.cell_size = get_real((self.kernel_scale_factor+1)*self.Mh,
self.cl_precision)
else:
self.cell_size = self.const_cell_size
def _find_num_cells(self):
""" Find the number of Cells in each coordinate direction
The number of cells is found from the simulation bounds and
the cell size used for binning.
"""
max_pnt = Point(self.Mx, self.My, self.Mz)
max_cid = py_find_cell_id(max_pnt, self.cell_size)
min_pnt = Point(self.mx, self.my, self.mz)
min_cid = py_find_cell_id(min_pnt, self.cell_size)
self.ncx = numpy.int32(max_cid.x - min_cid.x + 1)
self.ncy = numpy.int32(max_cid.y - min_cid.y + 1)
self.ncz = numpy.int32(max_cid.z - min_cid.z + 1)
self.mcx = numpy.int32(min_cid.x)
self.mcy = numpy.int32(min_cid.y)
self.mcz = numpy.int32(min_cid.z)
self.ncells = numpy.int32(self.ncx * self.ncy * self.ncz)
def _setup_program(self):
pass
class LinkedListManager(DomainManager):
""" Domain manager using bins as the indexing scheme and a linked
list as the neighbor locator scheme.
Data Attributes:
----------------
arrays : list
The particle arrays handled by the manager
head : dict
Head arrays for each ParticleArray maintained.
The dictionary is keyed on name of the ParticleArray,
with the head array as value.
Next : dict
Next array for each ParticleArray maintained.
The dictionary is keyed on name of the ParticleArray,
with the next array as value.
const_cell_size : REAL
Optional constant cell size used for binning.
cell_size : REAL
Cell size used for binning.
cl_precision : string
OpenCL precision to use. This is taken from the ParticleArrays
Mx, mx, My, my, Mz, mz -- REAL
Global bounds for the binning
ncx, ncy, ncz -- uint
Number of cells in each coordinate direction
ncells -- uint
Total number of cells : (ncx * ncy * ncz)
with_cl -- bool
Flag to use OpenCL for the neighbor list generation.
"""
def __init__(self, arrays, cell_size=None, context=None,
kernel_scale_factor = 2.0, with_cl=True):
""" Construct a linked list manager.
Parameters:
------------
arrays -- list
The ParticleArrays being managed.
cell_size -- REAL
The optional bin size to use
kernel_scale_factor --REAL.
the scale factor for the radius
with_cl -- bool
Explicitly choose OpenCL
A LinkedListManager constructs and maintains a linked list for
a list of particle arrays. The linked list data structure is
consists of two arrays per particle array
head : An integer array of size ncells, where ncells is the
total number of cells in the domain. Each entry points to the
index of a particle belonging to the cell. A negative index
(-1) indicates and empty cell.
next : An integer array of size num_particles. Each entry
points to the next particle in the same cell. A negative index
(-1) indicates no more particles.
The bin size, if provided is constant in each coordinate
direction. The default choice for the bin size is twice the
maximum smoothing length for all particles in the domain.
"""
DomainManager.__init__(self, arrays, context, with_cl)
# set the kernel scale factor
self.kernel_scale_factor = kernel_scale_factor
# set the cell size
self.const_cell_size = cell_size
if cell_size:
self.const_cell_size = get_real(cell_size, self.cl_precision)
# find global bounds (simulation box and ncells)
self._find_bounds()
# The linked list structures for the arrays.
self.Next = {}
self.head = {}
self.cellids = {}
self.locks = {}
self.indices = {}
self.ix = {}
self.iy = {}
self.iz = {}
# device linked list structures
self.dnext = {}
self.dhead = {}
self.dcellids = {}
self.dlocks = {}
self.dindices = {}
self.dix = {}
self.diy = {}
self.diz = {}
# dict for kernel launch parameters
self.global_sizes = {}
self.local_sizes = {}
# initialize counter for the iterator
self._current_cell = 0
# initialize the linked list
self._init_linked_list()
#######################################################################
# public interface
#######################################################################
def update(self):
""" Update the linked list """
# find the bounds for the manager
self._find_bounds()
# reset the data structures
self._init_linked_list()
# update the data structures
if self.with_cl:
self._cl_update()
else:
self._cy_update()
def enqueue_copy(self):
""" Copy the Buffer contents to the host
The buffers copied are
cellids, head, next, dix, diy, diz
"""
if self.with_cl:
for pa in self.arrays:
enqueue_copy(self.queue, dst=self.cellids[pa.name],
src=self.dcellids[pa.name])
enqueue_copy(self.queue, dst=self.head[pa.name],
src=self.dhead[pa.name])
enqueue_copy(self.queue, dst=self.Next[pa.name],
src=self.dnext[pa.name])
enqueue_copy(self.queue, dst=self.ix[pa.name],
src=self.dix[pa.name])
enqueue_copy(self.queue, dst=self.iy[pa.name],
src=self.diy[pa.name])
enqueue_copy(self.queue, dst=self.iz[pa.name],
src=self.diz[pa.name])
###########################################################################
# non-public interface
###########################################################################
def _init_linked_list(self):
""" Initialize the linked list dictionaries to store the
particle neighbor information.
Three arrays, namely, head, next and cellids are created per
particle array.
"""
ncells = self.ncells
for i in range(self.narrays):
pa = self.arrays[i]
np = pa.get_number_of_particles()
head = numpy.ones(ncells, numpy.int32) * numpy.int32(-1)
next = numpy.ones(np, numpy.int32) * numpy.int32(-1)
cellids = numpy.ones(np, numpy.uint32)
locks = numpy.zeros(ncells, numpy.int32)
indices = numpy.arange(np, dtype=numpy.uint32)
ix = numpy.ones(np, numpy.uint32)
iy = numpy.ones(np, numpy.uint32)
iz = numpy.ones(np, numpy.uint32)
self.head[pa.name] = head
self.Next[pa.name] = next
self.cellids[pa.name] = cellids
self.locks[pa.name] = locks
self.indices[pa.name] = indices
self.ix[pa.name] = ix
self.iy[pa.name] = iy
self.iz[pa.name] = iz
if self.with_cl:
self._init_device_buffers()
def _init_device_buffers(self):
""" Initialize the device buffers """
for i in range(self.narrays):
pa = self.arrays[i]
np = pa.get_number_of_particles()
# initialize the kerel launch parameters
self.global_sizes[pa.name] = (np,)
self.local_sizes[pa.name] = (1,)
head = self.head[pa.name]
next = self.Next[pa.name]
cellids = self.cellids[pa.name]
locks = self.locks[pa.name]
indices = self.indices[pa.name]
ix = self.ix[pa.name]
iy = self.iy[pa.name]
iz = self.iz[pa.name]
dhead = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=head)
dnext = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=next)
dcellids = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=cellids)
dlocks = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=locks)
dindices = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=indices)
dix = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=ix)
diy = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=iy)
diz = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=iz)
self.dhead[pa.name] = dhead
self.dnext[pa.name] = dnext
self.dcellids[pa.name] = dcellids
self.dlocks[pa.name] = dlocks
self.dindices[pa.name] = dindices
self.dix[pa.name] = dix
self.diy[pa.name] = diy
self.diz[pa.name] = diz
def _cy_update(self):
""" Construct the linked lists for the particle arrays using Cython"""
ncx, ncy, ncz = self.ncx, self.ncy, self.ncz
mx, my, mz = self.mx, self.my, self.mz
cell_size = self.cell_size
cell_size = get_real(self.cell_size, self.cl_precision)
for i in range(self.narrays):
pa = self.arrays[i]
np = pa.get_number_of_particles()
x, y, z = pa.get('x','y','z')
if self.cl_precision == 'single':
x = x.astype(numpy.float32)
y = y.astype(numpy.float32)
z = z.astype(numpy.float32)
cbin( x, y, z,
self.cellids[pa.name],
self.ix[pa.name],
self.iy[pa.name],
self.iz[pa.name],
self.head[pa.name],
self.Next[pa.name],
mx, my, mz,
numpy.int32(ncx), numpy.int32(ncy), numpy.int32(ncz),
cell_size, numpy.int32(np),
self.mcx, self.mcy, self.mcz
)
def _cl_update(self):
""" Construct the linked lists for the particle arrays using OpenCL"""
for i in range(self.narrays):
pa = self.arrays[i]
x = pa.get_cl_buffer('x')
y = pa.get_cl_buffer('y')
z = pa.get_cl_buffer('z')
# Bin particles
self.prog.bin( self.queue,
self.global_sizes[pa.name],
self.local_sizes[pa.name],
x, y, z,
self.dcellids[pa.name],
self.dix[pa.name],
self.diy[pa.name],
self.diz[pa.name],
self.mx,
self.my,
self.mz,
self.ncx,
self.ncy,
self.ncz,
self.cell_size,
self.mcx,
self.mcy,
self.mcz
).wait()
self.prog.construct_neighbor_list(self.queue,
self.global_sizes[pa.name],
self.local_sizes[pa.name],
self.dcellids[pa.name],
self.dhead[pa.name],
self.dnext[pa.name],
self.dlocks[pa.name]
).wait()
def _setup_program(self):
""" Read the OpenCL kernel source file and build the program """
src_file = get_pysph_root() + '/base/linked_list.cl'
src = cl_read(src_file, precision=self.cl_precision)
self.prog = cl.Program(self.context, src).build()
#######################################################################
# object interface
#######################################################################
def next(self):
"""Iterator interface to get cell neighbors.
Usage:
------
for cell_nbrs in LinkedListManager():
...
where, the length of the iterator is `ncells` and at each call,
the `forward` neighbors for the cell are returned.
The `forward` cells for a given cell with index cid are
neighboring cells with an index cid' >= cid
"""
if self._current_cell == self.ncells:
self._current_cell = 0
raise StopIteration
else:
# we are getting neighbors for the current cell
cid = self._current_cell
# get the cell indices for the current cell to search for
ncx = self.ncx
ncy = self.ncy
ncz = self.ncz
ix, iy, iz = unflatten(cid, ncx, ncy)
# determine the range of search
imin = max(ix -1, 0)
jmin = max(iy -1, 0)
kmin = max(iz -1, 0)
imax = min(ix + 2, ncx)
jmax = min(iy + 2, ncy)
kmax = min(iz + 2, ncz)
# raise the counter for the current cell
self._current_cell += 1
return [i+j*ncx+k*ncx*ncy \
for i in range(imin, imax) \
for j in range(jmin, jmax) \
for k in range(kmin, kmax) \
if i+j*ncx+k*ncx*ncy >= cid]
##########################################################################
# DEPRECATED
##########################################################################
def reset_cy_data(self):
for pa in self.arrays:
head = self.head[pa.name]
next = self.Next[pa.name]
head[:] = -1
next[:] = -1
def reset_cl_data(self):
for pa in self.arrays:
dhead = self.dhead[pa.name]
dnext = self.dnext[pa.name]
dlocks = self.dlocks[pa.name]
global_sizes = (int(self.ncells),)
val = numpy.int32(-1)
self.prog.reset(self.queue, global_sizes, None, dhead, val).wait()
val = numpy.int32(0)
self.prog.reset(self.queue, global_sizes, None, dlocks, val).wait()
global_sizes = self.global_sizes[pa.name]
val = numpy.int32(-1)
self.prog.reset(self.queue, global_sizes, None, dnext, val).wait()
def reset_data(self):
""" Initialize the data structures.
Head is initialized to -1
Next is initialized to -1
locks is initialized to 0
"""
if self.with_cl:
self.reset_cl_data()
else:
self.reset_cy_data()
class RadixSortManager(DomainManager):
"""Spatial indexing scheme based on the radix sort.
The radix sort can be used to determine neighbor information in
the following way. Consider the particle distribution in an
idealized one dimensional cell structure as:
_____________
| | | |
| 2 |0,1| 3 |
|___|___|___|
that is, particles with indices 0 and 1 are in cell 1, particle 2
is in cell 0 and 3 in cell 3
We construct two arrays:
cellids (size=np) : [1,1,0,2] and
indices (size=np) : [0,1,2,3]
and sort the indices based on the keys. After the sorting routine,
the arrays are:
cellids (size=np) : [0,1,1,2]
indices (size=np) : [2,0,1,3]
Now we can compute an array cell_counts (size=ncells+1) from
the sorted cellids as:
cellc = [0, 1, 3, 4],
which can be computed by launching one thread per particle. If the
sorted cellid to the left is different from this cellid, then this
particle is at a cell boundary and the index of that particle in
the sorted cellids is placed in the `cellc` array at that
location. Of course, there will be as many cell boundaries as
there are cells. The boundary conditions will have to be handled
separately.
Now using this we can determine the particles that belong to a
particular cell like so:
particles in cell0 = indices[ cellids[cellc[0]] : cellids[cellc[1]] ]
"""
def __init__(self, arrays, cell_size=None, context=None,
kernel_scale_factor = 2.0, with_cl=True, device='CPU'):
""" Construct a RadixSort manager.
Parameters:
------------
arrays -- list
The ParticleArrays being managed.
cell_size -- REAL
The optional bin size to use
kernel_scale_factor --REAL.
the scale factor for the radius
with_cl -- bool
Explicitly choose OpenCL
The RadixSort manager constructs and maintains the following
attributes for each array being indexed:
(i) cellids (size=np, uint32) : Flattened cell indices for the particles.
(ii) indices (size=np, uint32) : Particle indices
(iii) cell_counts(size=ncells+1, uint32) : Cell count array
The bin size, if provided is constant in each coordinate
direction. The default choice for the bin size is twice the
maximum smoothing length for all particles in the domain.
"""
DomainManager.__init__(self, arrays, context, with_cl, device)
# set the kernel scale factor
self.kernel_scale_factor = kernel_scale_factor
# set the cell size
self.const_cell_size = cell_size
if cell_size is not None:
self.const_cell_size = get_real(cell_size, self.cl_precision)
# find global bounds (simulation box and ncells)
self._find_bounds()
# The arrays stored for the RadixSortManager
self.cellids = {}
self.indices = {}
self.cell_counts = {}
# setup the RadixSort objects
self.rsort = rsort = {}
self._setup_radix_sort()
# Corresponding device arrays
self.dcellids = {}
self.dindices = {}
self.dcell_counts = {}
# dict for kernel launch parameters
self.global_sizes = {}
self.local_sizes = {}
# initialize counter for the iterator
self._current_cell = 0
# initialize the host and device buffers
self._init_buffers()
#######################################################################
# public interface
#######################################################################
def update(self):
""" Update the linked list """
# find the bounds for the manager
self._find_bounds()
# reset the data structures
self._init_buffers()
# update the data structures
if self.with_cl:
self._cl_update()
else:
self._py_update()
def enqueue_copy(self):
""" Copy the Buffer contents to the host
The cell counts buffer is copied to the host.
"""
if self.with_cl:
for pa in self.arrays:
enqueue_copy(self.queue, dst=self.cellids[pa.name],
src=self.dcellids[pa.name])
enqueue_copy(self.queue, dst=self.indices[pa.name],
src=self.dindices[pa.name])
enqueue_copy(queue=self.queue, dst=self.cell_counts[pa.name],
src=self.dcell_counts[pa.name])
###########################################################################
# non-public interface
###########################################################################
def _init_buffers(self):
"""Allocate host and device buffers for the RadixSortManager
The arrays needed for the manager are:
(a) cellids of size np which indicates which cell the particle
belongs to.
(b) indices of size np which is initially a linear index range
for the particles. After sorting, this array is used to
determine particles within a cell.
(c) cell_counts of size ncells +1 which is used to determine
the start and end index for the particles within a cell.
"""
# at this point the number of cells is known
ncells = self.ncells
for i in range(self.narrays):
pa = self.arrays[i]
np = pa.get_number_of_particles()
# cellids and indices are of length np and dtype uint32
cellids = numpy.ones(np, numpy.uint32)
indices = numpy.array(range(np), dtype=numpy.uint32)
# cell_counts is of length ncells + 1
cellc = numpy.ones(ncells + 1, numpy.uint32)
# store these in the dictionary for this particle array
self.cellids[ pa.name ] = cellids
self.indices[ pa.name ] = indices
self.cell_counts[ pa.name ] = cellc
if self.with_cl:
self._init_device_buffers()
def _init_device_buffers(self):
"""Initialize the device buffers
The arrays initialized here are the cell counts and
indices. The RadixSort object handles the keys and values.
"""
narrays = self.narrays
for i in range(narrays):
pa = self.arrays[i]
cellids = self.cellids[pa.name]
indices = self.indices[pa.name]
cellc = self.cell_counts[pa.name]
# Initialize the buffers
dcellids = cl.Buffer(self.context, mf.READ_WRITE|mf.COPY_HOST_PTR,
hostbuf=cellids)
dindices = cl.Buffer(self.context, mf.READ_WRITE|mf.COPY_HOST_PTR,
hostbuf=indices)
dcellc = cl.Buffer(self.context, mf.READ_WRITE|mf.COPY_HOST_PTR,
hostbuf=cellc)
self.dcellids[pa.name] = dcellids
self.dindices[pa.name] = dindices
self.dcell_counts[ pa.name ] = dcellc
def _setup_radix_sort(self):
"""Setup the RadixSort objects to be used.
Currently, only the AMDRadixSort is available which works on
both the CPU and the GPU. The NvidiaRadixSort works only on
Nvidia GPU's.
"""
narrays = self.narrays
rsort = self.rsort
if not self.with_cl:
for pa in self.arrays:
rsort[pa.name] = AMDRadixSort()
else:
ctx = self.context
for pa in self.arrays:
if clu.iscpucontext(ctx):
rsort[ pa.name ] = AMDRadixSort()
elif clu.isgpucontext(ctx):
#rsort[ pa.name ] = AMDRadixSort()
rsort[ pa.name ] = NvidiaRadixSort()
def _cl_update(self):
"""Update the data structures.
The following three steps are performed in order:
(a) The particles are binned using a standard algorithm like the one
for linked lists.
(b) Sort the resulting cellids (keys) and indices (values) using
the RadixSort objects
(c) Compute the cell counts by examining the sorted cellids
"""
# context and queue
ctx = self.context
q = self.queue
# get the cell limits
ncx, ncy, ncz = self.ncx, self.ncy, self.ncz
mcx, mcy, mcz = self.mcx, self.mcy, self.mcz
narrays = self.narrays
for i in range(narrays):
pa = self.arrays[i]
np = pa.get_number_of_particles()
# get launch parameters for this array
global_sizes = (np,1,1)
local_sizes = (1,1,1)
x = pa.get_cl_buffer("x")
y = pa.get_cl_buffer("y")
z = pa.get_cl_buffer("z")
# bin the particles to get device cellids
cellids = self.cellids[pa.name]
indices = self.indices[pa.name]
cellc = self.cell_counts[pa.name]
dcellids = self.dcellids[pa.name]
dindices = self.dindices[pa.name]
dcell_counts = self.dcell_counts[pa.name]
self.prog.bin( q, global_sizes, local_sizes,
x, y, z, dcellids, self.cell_size,
ncx, ncy, ncz, mcx, mcy, mcz ).wait()
# read the cellids into host array
clu.enqueue_copy(q, src=dcellids, dst=cellids)
# initialize the RadixSort with keys and values
keys = cellids
values = indices
rsort = self.rsort[ pa.name ]
rsort.initialize(keys, values, self.context)
# sort the keys (cellids) and values (indices)
rsort.sort()
sortedcellids = rsort.dkeys
self.prog.compute_cell_counts(q, global_sizes, local_sizes,
sortedcellids, dcell_counts,
numpy.uint32(self.ncells),
numpy.uint32(np)).wait()
# read the result back to host
# THIS MAY NEED TO BE DONE OR WE COULD SIMPLY LET IT RESIDE
# ON THE DEVICE.
clu.enqueue_copy(q, src=dcell_counts, dst=self.cell_counts[pa.name])
def _py_update(self):
"""Update the data structures using Python"""
cellsize = self.cell_size
cellsize1 = 1.0/cellsize
narrays = self.narrays
for i in range(narrays):
pa = self.arrays[i]
np = pa.get_number_of_particles()
# bin the particles
cellids = self.cellids[pa.name]
x, y, z = pa.get("x", "y", "z")
for j in range(np):
_ix = int(numpy.floor( x[j] * cellsize1 ))
_iy = int(numpy.floor( y[j] * cellsize1 ))
_iz = int(numpy.floor( z[j] * cellsize1 ))
cellids[j] = numpy.uint32( (_iz - self.mcz)*self.ncx*self.ncy + \
(_iy - self.mcy)*self.ncx + \
(_ix - self.mcx) )
# sort the cellids and indices
keys = cellids
values = self.indices[pa.name]
rsort = self.rsort[pa.name]
rsort._sort_cpu(keys, values)
# compute the cell_count array
cellc = self.cell_counts[pa.name]
cellids = keys
for j in range(np):
cellid = cellids[j]
if j == 0:
for k in range(cellid + 1):
cellc[k] = 0
elif j == (np - 1):
for k in range(cellid+1, self.ncells + 1):
cellc[k] = np
cellidm = cellids[j-1]
for k in range(cellid - cellidm):
cellc[cellid - k] = j
else:
cellidm = cellids[j-1]
for k in range(cellid - cellidm):
cellc[cellid - k] = j
def _setup_program(self):
""" Read the OpenCL kernel source file and build the program """
src_file = get_pysph_root() + '/base/radix_sort.cl'
src = cl_read(src_file, precision=self.cl_precision)
self.prog = cl.Program(self.context, src).build()
| Python |
# OpenCL conditional imports
import pysph.solver.cl_utils as clu
if clu.HAS_CL:
import pyopencl as cl
mf = cl.mem_flags
import numpy as np
class Scan(object):
def __init__(self, GPUContext,
CommandQueue,
numElements):
# Constants
MAX_WORKGROUP_INCLUSIVE_SCAN_SIZE = 1024
MAX_LOCAL_GROUP_SIZE = 256
self.WORKGROUP_SIZE = 256
self.MAX_BATCH_ELEMENTS = 64 * 1048576; #64 * numElements
self.MIN_SHORT_ARRAY_SIZE = 4;
self.MAX_SHORT_ARRAY_SIZE = 4 * self.WORKGROUP_SIZE;
self.MIN_LARGE_ARRAY_SIZE = 8 * self.WORKGROUP_SIZE;
self.MAX_LARGE_ARRAY_SIZE = 4 * self.WORKGROUP_SIZE * self.WORKGROUP_SIZE;
self.size_uint = size_uint = np.uint32(0).nbytes
# OpenCL elements
self.cxGPUContext = GPUContext
self.cqCommandQueue = CommandQueue
self.mNumElements = numElements
mf = cl.mem_flags
if (numElements > MAX_WORKGROUP_INCLUSIVE_SCAN_SIZE):
self.d_Buffer = cl.Buffer(self.cxGPUContext, mf.READ_WRITE, np.int(numElements/MAX_WORKGROUP_INCLUSIVE_SCAN_SIZE * size_uint))
# Program
src_file = clu.get_pysph_root() + '/base/Scan_b.cl'
src = open(src_file).read()
cpProgram = cl.Program(self.cxGPUContext, src).build()
# Kernel
self.ckScanExclusiveLocal1 = cpProgram.scanExclusiveLocal1
self.ckScanExclusiveLocal2 = cpProgram.scanExclusiveLocal2
self.ckUniformUpdate = cpProgram.uniformUpdate
def scanExclusiveLarge(self, d_Dst, d_Src, batchSize, arrayLength):
# I
WORKGROUP_SIZE = self.WORKGROUP_SIZE
size = np.uint32(4 * WORKGROUP_SIZE)
n = (batchSize * arrayLength) / (4 * WORKGROUP_SIZE)
localWorkSize = (np.int(WORKGROUP_SIZE),)
globalWorkSize = (np.int((n * size) / 4), )
# create Local Memory
l_data1 = cl.LocalMemory(np.int(2 * WORKGROUP_SIZE * self.size_uint))
self.ckScanExclusiveLocal1(self.cqCommandQueue, globalWorkSize, localWorkSize,
d_Dst,
d_Src,
l_data1,
size).wait()
# II
size = np.uint32(arrayLength / (4 * WORKGROUP_SIZE))
n = batchSize
elements = np.uint32(n * size)
globalWorkSize = (self.iSnapUp(elements, WORKGROUP_SIZE),)
# create Local Memory
l_data2 = cl.LocalMemory(np.int(2 * WORKGROUP_SIZE * self.size_uint))
self.ckScanExclusiveLocal2(self.cqCommandQueue, globalWorkSize, localWorkSize,
self.d_Buffer,
d_Dst,
d_Src,
l_data2,
elements,
size).wait()
# III
n = (batchSize * arrayLength) / (4 * WORKGROUP_SIZE)
localWorkSize = (np.int(WORKGROUP_SIZE),)
globalWorkSize = (np.int(n * WORKGROUP_SIZE),)
self.ckUniformUpdate(self.cqCommandQueue, globalWorkSize, localWorkSize,
d_Dst,
self.d_Buffer).wait()
def iSnapUp(self, dividend, divisor):
rem = dividend%divisor
if (rem == 0):
return np.int(dividend)
else:
return np.int(dividend - rem + divisor)
| Python |
#! python
'''
Module to collect and generate source files from template files
The template files have very similar syntax to php files.
* All text in input is copied straight to output except that within
`<?py` and `?>` tags.
* Text within `<?py=` and `?>` tags is evaluated and the result is written
into the output file as a string
* Text within `<?py` and `?>` tags is executed with a file-like object `out`
defined which can be written into using `out.write(<string>)`
* Note however that unlike php each code tag cannot extend across different
tags. For example you can ``NOT`` write a loop like:
..
<?py for i in range(5): ?>
In loop with i=<?py= i ?> .
<?py # End of loop ?>
* The imports and globals defined are persisted through all code sections
When used to locate source files as a main program:
The template files must have an extension '.src'.
The generated files have the name same as the src file but with the '.src'
extension removed and the last underscore '_' replaced with a dot '.'
Example: `carray_pyx.src` is generated into `carray.pyx`
'''
import os
import sys
import re
from StringIO import StringIO
def is_modified_later(filename1, filename2):
''' return `True` if the file1 is modified later than file2'''
return os.stat(filename1).st_mtime > os.stat(filename2).st_mtime
class FileGenerator(object):
'''class to generate source file from template'''
py_pattern = re.compile(r'''(?s)\<\?py(?P<code>.*?)\?\>''')
code_pattern = re.compile(r'''(?s)\<\?py(?!=)(?P<code>.*?)\?\>''')
expr_pattern = re.compile(r'''(?s)\<\?py=(?P<expr>.*?)\?\>''')
def generate_file_if_modified(self, infilename, outfilename, check=True):
'''generate source if template is modified later than the outfile
If `check` is True (default) then source is generated only if the
template has been modified later than the source file'''
if is_modified_later(infilename, outfilename):
self.generate_file(infilename, outfilename)
def generate_file(self, infile=sys.stdin, outfile=sys.stdout):
'''method to generate source file from a template file'''
inf = infile
outf = outfile
if isinstance(infile, type('')):
inf = open(infile, 'r')
if isinstance(outfile, type('')):
outf = open(outfile, 'w')
text = inf.read()
outtext = self.generate_output(text)
outf.write(outtext)
if isinstance(infile, type('')):
inf.close()
if isinstance(outfile, type('')):
outf.close()
def generate_output(self, intext):
'''generate output source as a string from given input template'''
self.dict = {}
return re.sub(self.py_pattern, self.sub_func, intext)
def sub_func(self, matchobj):
string = matchobj.group(0)
if string[4] == '=':
return str(self.get_expr_result(string[5:-3].strip()))
else:
return self.get_exec_output(string[4:-3].strip())
def get_exec_output(self, code_str):
'''the the output to a string `out` from execution of a code string'''
out = StringIO()
self.dict['out'] = out
exec code_str in self.dict
ret = out.getvalue()
out.close()
return ret
def get_expr_result(self, expr_str):
#out = StringIO()
#self.dict['out'] = out
ret = eval(expr_str, self.dict)
return ret
def get_src_files(dirname):
'''returns all files in directory having and extension `.src`'''
ls = os.listdir(dirname)
ls = [os.path.join(dirname,f) for f in ls if f.endswith('.src')]
return ls
def generate_files(src_files, if_modified=True):
'''generates source files from the template files with extension `.src`
If `if_modified` is True (default), the source file will be created only
if the template has been modified later than the source
'''
generator = FileGenerator()
for filename in src_files:
outfile = '.'.join(filename[:-4].rsplit('_',1))
if if_modified and not is_modified_later(filename, outfile):
print 'not',
print 'generating file %s from %s' %(outfile, filename)
generator.generate_file_if_modified(filename, outfile, if_modified)
def main(paths=None):
'''generates source files using template files
`args` is a list of `.src` template files to convert
if `args` is `None` all src files in this file's directory are converted
if `args` is an empty list all src files in current directory are converted
'''
if paths is None:
files = get_src_files(os.path.dirname(__file__))
elif len(paths)>0:
files = paths
else:
files = get_src_files(os.path.curdir)
generate_files(files)
if __name__ == '__main__':
import sys
if '--help' in sys.argv or '-h' in sys.argv:
print 'usage:'
print ' generator.py [filenames]'
print
print (' Convert template files with extension `.src` into '
'source files')
print (' If filenames is omitted all `.src` files in current '
'directory will be converted')
else:
main(sys.argv[1:])
| Python |
import nnps_util as util
import pysph.solver.cl_utils as clu
import numpy
# PySPH imports
from carray import LongArray
#CHANGE
class OpenCLNeighborLocatorType:
AllPairNeighborLocator = 0
LinkedListSPHNeighborLocator = 1
RadixSortNeighborLocator = 2
class OpenCLNeighborLocator(object):
pass
class LinkedListSPHNeighborLocator(OpenCLNeighborLocator):
def __init__(self, manager, source, dest, scale_fac=2.0, cache=False):
""" Create a neighbor locator between a ParticleArray pair.
A neighbor locator interfaces with a domain manager which
provides an indexing scheme for the particles. The locator
knows how to interpret the information generated after the
domain manager's `update` function has been called.
For the locators based on linked lists as the domain manager,
the head and next arrays are used to determine the neighbors.
Note:
-----
Cython functions to retrieve nearest neighbors given a
destination particle index is only used when OpenCL support is
not available.
When OpenCL is available, the preferred approach is to
generate the neighbor loop code and kernel arguments and
inject this into the CL template files (done by CLCalc)
Parameters:
-----------
manager : DomainManager
The domain manager to use for locating neighbors
source, dest : ParticleArray
pair for which neighbors are sought.
scale_fac : REAL
Radius scale factor for non OpenCL runs.
cache : bool
Flag to indicate if neighbors are to be cached.
"""
self.manager = manager
self.source = source
self.dest = dest
self.scale_fac = scale_fac
self.with_cl = manager.with_cl
self.cache = cache
# Initialize the cache if using with Cython
self.particle_cache = []
if self.cache:
self._initialize_cache()
#######################################################################
# public interface
#######################################################################
def get_nearest_particles(self, i, output_array, exclude_index=-1):
""" Return nearest particles from source array to the dest point.
The search radius is the scale factor times the particle's h
Parameters:
-----------
i : int
The destination index
output_array : (in/out) LongArray
Neighbor indices are stored in this array.
exclude_index : int
Optional index to exclude from the neighbor list
NOTIMPLEMENTED!
"""
if self.cache:
return self.neighbor_cache[i]
else:
self._get_nearest_particles_nocahe(i, output_array)
##########################################################################
# non-public interface
##########################################################################
def _update(self):
""" Update the bin structure and compute cache contents.
Caching is only done if explicitly requested and should be
avoided for large problems to reduce the memory footprint.
"""
# update the domain manager
self.manager.update()
# set the cache if required
if self.cache:
self._initialize_cache()
self._udpdate_cache()
def _get_nearest_particles_nocahe(self, i, output_array, exclude_index=-1):
""" Use the linked list to get nearest neighbors.
The functions defined in `linked_list_functions.pyx` are used to
find the nearest neighbors.
Parameters:
-----------
i : (in) int
The destination particle index
output_array : (in/out) LongArray
Neighbor indices are stored in this array.
exclude_index : int
Optional index to exclude from the neighbor list
NOTIMPLEMENTED!
"""
manager = self.manager
src = self.source
dst = self.dest
# Enqueue a copy if the binning is done with OpenCL
manager.enqueue_copy()
# get the bin structure parameters
ncx = manager.ncx
ncy = manager.ncy
ncells = manager.ncells
# linked list for the source
head = manager.head[src.name]
next = manager.Next[src.name]
# cellid for the destination
cellid = manager.cellids[dst.name][i]
ix = manager.ix[dst.name][i]
iy = manager.iy[dst.name][i]
iz = manager.iz[dst.name][i]
# get all neighbors from the 27 neighboring cells
nbrs = util.ll_get_neighbors(cellid, ix, iy, iz,
ncx, ncy, ncells, head, next)
x = src.x.astype(numpy.float32)
y = src.y.astype(numpy.float32)
z = src.z.astype(numpy.float32)
xi = numpy.float32( dst.x[i] )
yi = numpy.float32( dst.y[i] )
zi = numpy.float32( dst.z[i] )
h = dst.h.astype(numpy.float32)
radius = self.scale_fac * h[i]
# filter the neighbors to within a cutoff radius
nbrs = util.filter_neighbors(xi, yi, zi, radius, x, y, z, nbrs)
output_array.resize( len(nbrs) )
output_array.set_data( nbrs )
def _initialize_cache(self):
""" Iniitialize the particle neighbor cache contents.
The particle cache is one LongArray for each destination particle.
"""
np = self.dest.get_number_of_particles()
self.particle_cache = [ LongArray() for i in range(np) ]
def _udpdate_cache(self):
""" Compute the contents of the cache """
np = self.dest.get_number_of_particles()
for i in range(np):
nbrs = self.particle_cache[i]
self._get_nearest_particles_nocahe(i, nbrs)
def neighbor_loop_code_start(self):
""" Return a string for the start of the neighbor loop code """
return """
// int idx = cix[dest_id];
// int idy = ciy[dest_id];
// int idz = ciz[dest_id];
int idx = cix[particle_id];
int idy = ciy[particle_id];
int idz = ciz[particle_id];
REAL tmp = ncx*ncy;
int src_id, cid;
for (int ix = idx-1; ix <= idx+1; ++ix )
{
for (int iy = idy-1; iy <= idy+1; ++iy)
{
for (int iz = idz-1; iz <= idz+1; ++iz)
{
if ( (ix >=0) && (iy >=0) && (iz >= 0) )
{
cid = (ix) + (iy * ncx) + (iz * tmp);
if ( cid < ncells )
{
src_id = head[ cid ];
while ( src_id != -1 )
"""
def neighbor_loop_code_end(self):
""" Return a string for the start of the neighbor loop code """
return """
} // if cid < ncells
} // if ix >= 0
} // for iz
} // for iy
} // for ix
"""
def neighbor_loop_code_break(self):
return "src_id = next[ src_id ]; "
def get_kernel_args(self):
""" Add the kernel arguments for the OpenCL template """
dst = self.dest
src = self.source
cellids = self.manager.dcellids[dst.name]
cix = self.manager.dix[dst.name]
ciy = self.manager.diy[dst.name]
ciz = self.manager.diz[dst.name]
#CHANGE
head = self.manager.dhead[src.name]
next = self.manager.dnext[src.name]
indices = self.manager.dindices[dst.name]
return {'int const ncx': self.manager.ncx,
'int const ncy': self.manager.ncy,
'int const ncells': self.manager.ncells,
'__global uint* cellids': cellids,
'__global uint* cix': cix,
'__global uint* ciy': ciy,
'__global uint* ciz': ciz,
'__global int* head': head,
'__global int* next': next,
'__global uint* indices': indices
}
class AllPairNeighborLocator(OpenCLNeighborLocator):
def __init__(self, source, dest, scale_fac=2.0, cache=False):
""" Create a neighbor locator between a ParticleArray pair.
A neighbor locator interfaces with a domain manager which
provides an indexing scheme for the particles. The locator
knows how to interpret the information generated after the
domain manager's `update` function has been called.
For the locators based on linked lists as the domain manager,
the head and next arrays are used to determine the neighbors.
Note:
-----
Cython functions to retrieve nearest neighbors given a
destination particle index is only used when OpenCL support is
not available.
When OpenCL is available, the preferred approach is to
generate the neighbor loop code and kernel arguments and
inject this into the CL template files (done by CLCalc)
Parameters:
-----------
source, dest : ParticleArray
pair for which neighbors are sought.
scale_fac : REAL
Radius scale factor for non OpenCL runs.
cache : bool
Flag to indicate if neighbors are to be cached.
"""
self.manager = None
self.source = source
self.dest = dest
self.scale_fac = scale_fac
self.with_cl = True
# Explicitly set the cache to false
self.cache = False
# Initialize the cache if using with Cython
self.particle_cache = []
# set the dirty bit to True
self.is_dirty = True
def neighbor_loop_code_start(self):
""" Return a string for the start of the neighbor loop code """
return "for (int src_id=0; src_id<nbrs; ++src_id)"
def neighbor_loop_code_end(self):
""" Return a string for the start of the neighbor loop code """
return """ """
def neighbor_loop_code_break(self):
return ""
def get_kernel_args(self):
""" Add the kernel arguments for the OpenCL template """
src = self.source
np = numpy.int32(src.get_number_of_particles())
#CHANGE
return {'int const nbrs': np,
'__global uint* indices': indices}
def update(self):
""" Update the bin structure and compute cache contents if
necessary."""
if self.is_dirty:
self.is_dirty = False
def update_status(self):
""" Update the dirty bit for the locator and the DomainManager"""
if not self.is_dirty:
self.is_dirty = self.source.is_dirty or self.dest.is_dirty
##############################################################################
#`RadixSortNeighborLocator` class
##############################################################################
class RadixSortNeighborLocator(OpenCLNeighborLocator):
"""Neighbor locator using the RadixSortManager as domain manager."""
def __init__(self, manager, source, dest, scale_fac=2.0, cache=False):
""" Construct a neighbor locator between a pair of arrays.
Parameters:
-----------
manager : DomainManager
The underlying domain manager used for the indexing scheme for the
particles.
source : ParticleArray
The source particle array from where neighbors are sought.
dest : ParticleArray
The destination particle array for whom neighbors are sought.
scale_fac : float
Maximum kernel scale factor to determine cell size for binning.
cache : bool
Flag to indicate if neighbors are to be cached.
"""
self.manager = manager
self.source = source
self.dest = dest
self.with_cl = manager.with_cl
self.scale_fac = scale_fac
self.cache = cache
# Initialize the cache if using with Cython
self.particle_cache = []
if self.cache:
self._initialize_cache()
#######################################################################
# public interface
#######################################################################
def get_nearest_particles(self, i, output_array, exclude_index=-1):
""" Return nearest particles from source array to the dest point.
The search radius is the scale factor times the particle's h
Parameters:
-----------
i : int
The destination index
output_array : (in/out) LongArray
Neighbor indices are stored in this array.
exclude_index : int
Optional index to exclude from the neighbor list
NOTIMPLEMENTED!
"""
if self.cache:
return self.neighbor_cache[i]
else:
self._get_nearest_particles_nocahe(i, output_array)
##########################################################################
# non-public interface
##########################################################################
def _update(self):
""" Update the bin structure and compute cache contents.
Caching is only done if explicitly requested and should be
avoided for large problems to reduce the memory footprint.
"""
# update the domain manager
self.manager.update()
# set the cache if required
if self.cache:
self._initialize_cache()
self._udpdate_cache()
def _get_nearest_particles_nocahe(self, i, output_array, exclude_index=-1):
""" Use the linked list to get nearest neighbors.
The functions defined in `linked_list_functions.pyx` are used to
find the nearest neighbors.
Parameters:
-----------
i : (in) int
The destination particle index
output_array : (in/out) LongArray
Neighbor indices are stored in this array.
exclude_index : int
Optional index to exclude from the neighbor list
NOTIMPLEMENTED!
"""
manager = self.manager
src = self.source
dst = self.dest
# Enqueue a copy if the binning is done with OpenCL
manager.enqueue_copy()
# get the bin structure parameters
ncx = manager.ncx
ncy = manager.ncy
ncells = manager.ncells
#CHANGE
# cell_counts and indices for the source
cellc = manager.cell_counts[ src.name ]
s_indices = manager.indices[ src.name ]
# destination indices
d_indices = manager.indices[ dst.name ]
# cellid for the destination particle
cellid = manager.cellids[dst.name][i]
# get all neighbors from the 27 neighboring cells
nbrs = util.rs_get_neighbors(cellid, ncx, ncy, ncells, cellc, s_indices)
xs = src.x.astype(numpy.float32)
ys = src.y.astype(numpy.float32)
zs = src.z.astype(numpy.float32)
xi = numpy.float32( dst.x[d_indices[i]] )
yi = numpy.float32( dst.y[d_indices[i]] )
zi = numpy.float32( dst.z[d_indices[i]] )
radius = numpy.float32( self.scale_fac * dst.h[d_indices[i]] )
# filter the neighbors to within a cutoff radius
nbrs = util.filter_neighbors(xi, yi, zi, radius, xs, ys, zs, nbrs)
output_array.resize( len(nbrs) )
output_array.set_data( nbrs )
def neighbor_loop_code_start(self):
return """// unflatten cellid
int idx, idy, idz;
int s_cid, src_id;
int start_id, end_id;
int d_cid = cellids[ dest_id ];
idz = convert_int_rtn( d_cid/(ncx*ncy) );
d_cid = d_cid - (idz * ncx*ncy);
idy = convert_int_rtn( d_cid/ncx );
idx = d_cid - (idy * ncx);
for (int ix = idx-1; ix <= idx+1; ix++)
{
for (int iy = idy-1; iy <= idy+1; iy++)
{
for (int iz = idz-1; iz <= idz+1; iz++)
{
if ( (ix >=0) && (iy >=0) && (iz >= 0) )
{
s_cid = (ix) + (iy * ncx) + (iz * ncx*ncy);
if ( s_cid < ncells )
{
start_id = cell_counts[ s_cid ];
end_id = cell_counts[ s_cid + 1 ];
for (int i=start_id; i<end_id; ++i)
{
src_id = src_indices[ i ];
"""
def neighbor_loop_code_end(self):
""" Return a string for the start of the neighbor loop code """
return """
} // for (start,end)
} // if cid < ncells
} // if ix >= 0
} // for iz
} // for iy
} // for ix
"""
def neighbor_loop_code_break(self):
return ""
def get_kernel_args(self):
""" Add the kernel arguments for the OpenCL template """
dst = self.dest
src = self.source
#CHANGE
# copying the buffers created in sort no dm!
cellids = self.manager.rsort[dst.name].dkeys
dst_indices = self.manager.rsort[dst.name].dvalues
src_indices = self.manager.rsort[src.name].dvalues
cell_counts = self.manager.dcell_counts[src.name]
return {'int const ncx': self.manager.ncx,
'int const ncy': self.manager.ncy,
'int const ncells': self.manager.ncells,
'__global uint* cellids': cellids,
'__global uint* cell_counts': cell_counts,
'__global uint* src_indices': src_indices,
'__global uint* indices': dst_indices
}
# import nnps_util as util
# import numpy
# # PySPH imports
# from carray import LongArray
# class OpenCLNeighborLocatorType:
# AllPairNeighborLocator = 0
# LinkedListSPHNeighborLocator = 1
# RadixSortNeighborLocator = 2
# class OpenCLNeighborLocator(object):
# pass
# class LinkedListSPHNeighborLocator(OpenCLNeighborLocator):
# def __init__(self, manager, source, dest, scale_fac=2.0, cache=False):
# """ Create a neighbor locator between a ParticleArray pair.
# A neighbor locator interfaces with a domain manager which
# provides an indexing scheme for the particles. The locator
# knows how to interpret the information generated after the
# domain manager's `update` function has been called.
# For the locators based on linked lists as the domain manager,
# the head and next arrays are used to determine the neighbors.
# Note:
# -----
# Cython functions to retrieve nearest neighbors given a
# destination particle index is only used when OpenCL support is
# not available.
# When OpenCL is available, the preferred approach is to
# generate the neighbor loop code and kernel arguments and
# inject this into the CL template files (done by CLCalc)
# Parameters:
# -----------
# manager : DomainManager
# The domain manager to use for locating neighbors
# source, dest : ParticleArray
# pair for which neighbors are sought.
# scale_fac : REAL
# Radius scale factor for non OpenCL runs.
# cache : bool
# Flag to indicate if neighbors are to be cached.
# """
# self.manager = manager
# self.source = source
# self.dest = dest
# self.scale_fac = scale_fac
# self.with_cl = manager.with_cl
# self.cache = cache
# # Initialize the cache if using with Cython
# self.particle_cache = []
# if self.cache:
# self._initialize_cache()
# #######################################################################
# # public interface
# #######################################################################
# def get_nearest_particles(self, i, output_array, exclude_index=-1):
# """ Return nearest particles from source array to the dest point.
# The search radius is the scale factor times the particle's h
# Parameters:
# -----------
# i : int
# The destination index
# output_array : (in/out) LongArray
# Neighbor indices are stored in this array.
# exclude_index : int
# Optional index to exclude from the neighbor list
# NOTIMPLEMENTED!
# """
# if self.cache:
# return self.neighbor_cache[i]
# else:
# self._get_nearest_particles_nocahe(i, output_array)
# ##########################################################################
# # non-public interface
# ##########################################################################
# def _update(self):
# """ Update the bin structure and compute cache contents.
# Caching is only done if explicitly requested and should be
# avoided for large problems to reduce the memory footprint.
# """
# # update the domain manager
# self.manager.update()
# # set the cache if required
# if self.cache:
# self._initialize_cache()
# self._udpdate_cache()
# def _get_nearest_particles_nocahe(self, i, output_array, exclude_index=-1):
# """ Use the linked list to get nearest neighbors.
# The functions defined in `linked_list_functions.pyx` are used to
# find the nearest neighbors.
# Parameters:
# -----------
# i : (in) int
# The destination particle index
# output_array : (in/out) LongArray
# Neighbor indices are stored in this array.
# exclude_index : int
# Optional index to exclude from the neighbor list
# NOTIMPLEMENTED!
# """
# manager = self.manager
# src = self.source
# dst = self.dest
# # Enqueue a copy if the binning is done with OpenCL
# manager.enqueue_copy()
# # get the bin structure parameters
# ncx = manager.ncx
# ncy = manager.ncy
# ncells = manager.ncells
# # linked list for the source
# head = manager.head[src.name]
# next = manager.Next[src.name]
# # cellid for the destination
# cellid = manager.cellids[dst.name][i]
# ix = manager.ix[dst.name][i]
# iy = manager.iy[dst.name][i]
# iz = manager.iz[dst.name][i]
# # get all neighbors from the 27 neighboring cells
# nbrs = util.ll_get_neighbors(cellid, ix, iy, iz,
# ncx, ncy, ncells, head, next)
# x = src.x.astype(numpy.float32)
# y = src.y.astype(numpy.float32)
# z = src.z.astype(numpy.float32)
# xi = numpy.float32( dst.x[i] )
# yi = numpy.float32( dst.y[i] )
# zi = numpy.float32( dst.z[i] )
# h = dst.h.astype(numpy.float32)
# radius = self.scale_fac * h[i]
# # filter the neighbors to within a cutoff radius
# nbrs = util.filter_neighbors(xi, yi, zi, radius, x, y, z, nbrs)
# output_array.resize( len(nbrs) )
# output_array.set_data( nbrs )
# def _initialize_cache(self):
# """ Iniitialize the particle neighbor cache contents.
# The particle cache is one LongArray for each destination particle.
# """
# np = self.dest.get_number_of_particles()
# self.particle_cache = [ LongArray() for i in range(np) ]
# def _udpdate_cache(self):
# """ Compute the contents of the cache """
# np = self.dest.get_number_of_particles()
# for i in range(np):
# nbrs = self.particle_cache[i]
# self._get_nearest_particles_nocahe(i, nbrs)
# def neighbor_loop_code_start(self):
# """ Return a string for the start of the neighbor loop code """
# return """
# int idx = cix[particle_id];
# int idy = ciy[particle_id];
# int idz = ciz[particle_id];
# REAL tmp = ncx*ncy;
# int src_id, cid;
# for (int ix = idx-1; ix <= idx+1; ++ix )
# {
# for (int iy = idy-1; iy <= idy+1; ++iy)
# {
# for (int iz = idz-1; iz <= idz+1; ++iz)
# {
# if ( (ix >=0) && (iy >=0) && (iz >= 0) )
# {
# cid = (ix) + (iy * ncx) + (iz * tmp);
# if ( cid < ncells )
# {
# src_id = head[ cid ];
# while ( src_id != -1 )
# """
# def neighbor_loop_code_end(self):
# """ Return a string for the start of the neighbor loop code """
# return """
# } // if cid < ncells
# } // if ix >= 0
# } // for iz
# } // for iy
# } // for ix
# """
# def neighbor_loop_code_break(self):
# return "src_id = next[ src_id ]; "
# def get_kernel_args(self):
# """ Add the kernel arguments for the OpenCL template """
# dst = self.dest
# src = self.source
# cellids = self.manager.dcellids[dst.name]
# cix = self.manager.dix[dst.name]
# ciy = self.manager.diy[dst.name]
# ciz = self.manager.diz[dst.name]
# head = self.manager.dhead[src.name]
# next = self.manager.dnext[src.name]
# indices = self.manager.dindices[dst.name]
# return {'int const ncx': self.manager.ncx,
# 'int const ncy': self.manager.ncy,
# 'int const ncells': self.manager.ncells,
# '__global uint* cellids': cellids,
# '__global uint* cix': cix,
# '__global uint* ciy': ciy,
# '__global uint* ciz': ciz,
# '__global int* head': head,
# '__global int* next': next,
# '__global uint* indices': indices
# }
# class AllPairNeighborLocator(OpenCLNeighborLocator):
# def __init__(self, source, dest, scale_fac=2.0, cache=False):
# """ Create a neighbor locator between a ParticleArray pair.
# A neighbor locator interfaces with a domain manager which
# provides an indexing scheme for the particles. The locator
# knows how to interpret the information generated after the
# domain manager's `update` function has been called.
# For the locators based on linked lists as the domain manager,
# the head and next arrays are used to determine the neighbors.
# Note:
# -----
# Cython functions to retrieve nearest neighbors given a
# destination particle index is only used when OpenCL support is
# not available.
# When OpenCL is available, the preferred approach is to
# generate the neighbor loop code and kernel arguments and
# inject this into the CL template files (done by CLCalc)
# Parameters:
# -----------
# source, dest : ParticleArray
# pair for which neighbors are sought.
# scale_fac : REAL
# Radius scale factor for non OpenCL runs.
# cache : bool
# Flag to indicate if neighbors are to be cached.
# """
# self.manager = None
# self.source = source
# self.dest = dest
# self.scale_fac = scale_fac
# self.with_cl = True
# # Explicitly set the cache to false
# self.cache = False
# # Initialize the cache if using with Cython
# self.particle_cache = []
# # set the dirty bit to True
# self.is_dirty = True
# def neighbor_loop_code_start(self):
# """ Return a string for the start of the neighbor loop code """
# return "for (int src_id=0; src_id<nbrs; ++src_id)"
# def neighbor_loop_code_end(self):
# """ Return a string for the start of the neighbor loop code """
# return """ """
# def neighbor_loop_code_break(self):
# return ""
# def get_kernel_args(self):
# """ Add the kernel arguments for the OpenCL template """
# src = self.source
# np = numpy.int32(src.get_number_of_particles())
# return {'int const nbrs': np,
# '__global uint* indices': indices
# }
# def update(self):
# """ Update the bin structure and compute cache contents if
# necessary."""
# if self.is_dirty:
# self.is_dirty = False
# def update_status(self):
# """ Update the dirty bit for the locator and the DomainManager"""
# if not self.is_dirty:
# self.is_dirty = self.source.is_dirty or self.dest.is_dirty
# ##############################################################################
# #`RadixSortNeighborLocator` class
# ##############################################################################
# class RadixSortNeighborLocator(OpenCLNeighborLocator):
# """Neighbor locator using the RadixSortManager as domain manager."""
# def __init__(self, manager, source, dest, scale_fac=2.0, cache=False):
# """ Construct a neighbor locator between a pair of arrays.
# Parameters:
# -----------
# manager : DomainManager
# The underlying domain manager used for the indexing scheme for the
# particles.
# source : ParticleArray
# The source particle array from where neighbors are sought.
# dest : ParticleArray
# The destination particle array for whom neighbors are sought.
# scale_fac : float
# Maximum kernel scale factor to determine cell size for binning.
# cache : bool
# Flag to indicate if neighbors are to be cached.
# """
# self.manager = manager
# self.source = source
# self.dest = dest
# self.with_cl = manager.with_cl
# self.scale_fac = scale_fac
# self.cache = cache
# # Initialize the cache if using with Cython
# self.particle_cache = []
# if self.cache:
# self._initialize_cache()
# #######################################################################
# # public interface
# #######################################################################
# def get_nearest_particles(self, i, output_array, exclude_index=-1):
# """ Return nearest particles from source array to the dest point.
# The search radius is the scale factor times the particle's h
# Parameters:
# -----------
# i : int
# The destination index
# output_array : (in/out) LongArray
# Neighbor indices are stored in this array.
# exclude_index : int
# Optional index to exclude from the neighbor list
# NOTIMPLEMENTED!
# """
# if self.cache:
# return self.neighbor_cache[i]
# else:
# self._get_nearest_particles_nocahe(i, output_array)
# ##########################################################################
# # non-public interface
# ##########################################################################
# def _update(self):
# """ Update the bin structure and compute cache contents.
# Caching is only done if explicitly requested and should be
# avoided for large problems to reduce the memory footprint.
# """
# # update the domain manager
# self.manager.update()
# # set the cache if required
# if self.cache:
# self._initialize_cache()
# self._udpdate_cache()
# def _get_nearest_particles_nocahe(self, i, output_array, exclude_index=-1):
# """ Use the linked list to get nearest neighbors.
# The functions defined in `linked_list_functions.pyx` are used to
# find the nearest neighbors.
# Parameters:
# -----------
# i : (in) int
# The destination particle index
# output_array : (in/out) LongArray
# Neighbor indices are stored in this array.
# exclude_index : int
# Optional index to exclude from the neighbor list
# NOTIMPLEMENTED!
# """
# manager = self.manager
# src = self.source
# dst = self.dest
# # Enqueue a copy if the binning is done with OpenCL
# manager.enqueue_copy()
# # get the bin structure parameters
# ncx = manager.ncx
# ncy = manager.ncy
# ncells = manager.ncells
# # cell_counts and indices for the source
# cellc = manager.cell_counts[ src.name ]
# s_indices = manager.indices[ src.name ]
# # destination indices
# d_indices = manager.indices[ dst.name ]
# # cellid for the destination particle
# cellid = manager.cellids[dst.name][i]
# # get all neighbors from the 27 neighboring cells
# nbrs = util.rs_get_neighbors(cellid, ncx, ncy, ncells, cellc, s_indices)
# xs = src.x.astype(numpy.float32)
# ys = src.y.astype(numpy.float32)
# zs = src.z.astype(numpy.float32)
# xi = numpy.float32( dst.x[d_indices[i]] )
# yi = numpy.float32( dst.y[d_indices[i]] )
# zi = numpy.float32( dst.z[d_indices[i]] )
# radius = numpy.float32( self.scale_fac * dst.h[d_indices[i]] )
# # filter the neighbors to within a cutoff radius
# nbrs = util.filter_neighbors(xi, yi, zi, radius, xs, ys, zs, nbrs)
# output_array.resize( len(nbrs) )
# output_array.set_data( nbrs )
# def neighbor_loop_code_start(self):
# return """// unflatten cellid
# int idx, idy, idz;
# int s_cid, src_id;
# int start_id, end_id;
# int d_cid = cellids[ dest_id ];
# idz = convert_int_rtn( d_cid/(ncx*ncy) );
# d_cid = d_cid - (idz * ncx*ncy);
# idy = convert_int_rtn( d_cid/ncx );
# idx = d_cid - (idy * ncx);
# for (int ix = idx-1; ix <= idx+1; ix++)
# {
# for (int iy = idy-1; iy <= idy+1; iy++)
# {
# for (int iz = idz-1; iz <= idz+1; iz++)
# {
# if ( (ix >=0) && (iy >=0) && (iz >= 0) )
# {
# s_cid = (ix) + (iy * ncx) + (iz * ncx*ncy);
# if ( s_cid < ncells )
# {
# start_id = cell_counts[ s_cid ];
# end_id = cell_counts[ s_cid + 1 ];
# for (int i=start_id; i<end_id; ++i)
# {
# src_id = src_indices[ i ];
# """
# def neighbor_loop_code_end(self):
# """ Return a string for the start of the neighbor loop code """
# return """
# } // for (start,end)
# } // if cid < ncells
# } // if ix >= 0
# } // for iz
# } // for iy
# } // for ix
# """
# def neighbor_loop_code_break(self):
# return ""
# def get_kernel_args(self):
# """ Add the kernel arguments for the OpenCL template """
# dst = self.dest
# src = self.source
# # copying the buffers created in sort no dm!
# cellids = self.manager.rsort[dst.name].dkeys
# dst_indices = self.manager.rsort[dst.name].dvalues
# src_indices = self.manager.rsort[src.name].dvalues
# cell_counts = self.manager.dcell_counts[src.name]
# return {'int const ncx': self.manager.ncx,
# 'int const ncy': self.manager.ncy,
# 'int const ncells': self.manager.ncells,
# '__global uint* cellids': cellids,
# '__global uint* cell_counts': cell_counts,
# '__global uint* src_indices': src_indices,
# '__global uint* indices': dst_indices
# }
| Python |
# standard imports
import numpy
# local imports
from pysph.base.particle_array import *
def generate_sample_dataset_1():
"""
Generate test test data.
Look at image test_cell_case1.png for details.
"""
x = numpy.array([0.25, 0.8, 0.5, 0.8, 0.2, 0.5, 1.5, 1.5])
y = numpy.array([0.25, 0.1, 0.5, 0.8, 0.9, 1.5, 0.5, 1.5])
z = numpy.array([0., 0, 0, 0, 0, 0, 0, 0])
h = numpy.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
parr1 = ParticleArray(name='parr1', **{'x':{'data':x}, 'y':{'data':y}, 'z':{'data':z}, 'h':{'data':h}})
x = numpy.array([0.2, 1.2, 1.5, 0.4])
y = numpy.array([0., 0, 0, 0])
z = numpy.array([1.6, 1.5, -0.5, 0.4])
h = numpy.array([1.0, 1.0, 1.0, 1.0])
parr2 = ParticleArray(name='parr2', **{'x':{'data':x}, 'y':{'data':y}, 'z':{'data':z}, 'h':{'data':h}})
return [parr1, parr2]
def generate_sample_dataset_2():
"""
Generate test test data.
Look at image test_cell_data2.png for details.
"""
x = numpy.array([-0.5, -0.5, 0.5, 0.5, 1.5, 2.5, 2.5])
y = numpy.array([2.5, -0.5, 1.5, 0.5, 0.5, 0.5, -0.5])
z = numpy.array([0., 0, 0, 0, 0, 0, 0])
h = numpy.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
parr1 = ParticleArray(name='parr1', **{'x':{'data':x}, 'y':{'data':y}, 'z':{'data':z}, 'h':{'data':h}})
return [parr1]
| Python |
"""
Tests for the particle array module.
"""
# standard imports
import unittest
import numpy
# local imports
import pysph
from pysph.base import particle_array
from pysph.base.carray import LongArray, IntArray, DoubleArray
from pysph.base import carray
import pickle
def check_array(x, y):
"""Check if two arrays are equal with an absolute tolerance of
1e-16."""
return numpy.allclose(x, y, atol=1e-16, rtol=0)
###############################################################################
# `ParticleArrayTest` class.
###############################################################################
class ParticleArrayTest(unittest.TestCase):
"""
Tests for the particle array class.
"""
def test_constructor(self):
"""
Test the constructor.
"""
# Default constructor test.
p = particle_array.ParticleArray(name='test_particle_array')
self.assertEqual(p.name, 'test_particle_array')
self.assertEqual(p.temporary_arrays == {}, True)
self.assertEqual(p.is_dirty, True)
self.assertEqual(p.properties.has_key('tag'), True)
self.assertEqual(p.properties['tag'].length, 0)
# Constructor with some properties.
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x},
y={'data':y},
z={'data':z},
m={'data':m},
h={'data':h})
self.assertEqual(p.name, '')
self.assertEqual(p.properties.has_key('x'), True)
self.assertEqual(p.properties.has_key('y'), True)
self.assertEqual(p.properties.has_key('z'), True)
self.assertEqual(p.properties.has_key('m'), True)
self.assertEqual(p.properties.has_key('h'), True)
# get the properties are check if they are the same
xarr = p.properties['x'].get_npy_array()
self.assertEqual(check_array(xarr, x), True)
yarr = p.properties['y'].get_npy_array()
self.assertEqual(check_array(yarr, y), True)
zarr = p.properties['z'].get_npy_array()
self.assertEqual(check_array(zarr, z), True)
marr = p.properties['m'].get_npy_array()
self.assertEqual(check_array(marr, m), True)
harr = p.properties['h'].get_npy_array()
self.assertEqual(check_array(harr, h), True)
# check if the 'tag' array was added.
self.assertEqual(p.properties.has_key('tag'), True)
self.assertEqual(p.properties.values()[0].length == len(x), True)
# Constructor with tags
tags = [0, 1, 0, 1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z},
tag={'data':tags,'type':'long'})
self.assertEqual(check_array(p.get('tag', only_real_particles=False),
[0,0,1,1]), True)
self.assertEqual(check_array(p.get('x', only_real_particles=False),
[1,3,2,4]), True)
self.assertEqual(check_array(p.get('y', only_real_particles=False),
[0,2,1,3]), True)
self.assertEqual(check_array(p.get('z', only_real_particles=False),
[0,0,0,0]), True)
# trying to create particle array without any values but some
# properties.
p = particle_array.ParticleArray(x={}, y={}, z={}, h={})
self.assertEqual(p.get_number_of_particles(), 0)
self.assertEqual(p.properties.has_key('x'), True)
self.assertEqual(p.properties.has_key('y'), True)
self.assertEqual(p.properties.has_key('z'), True)
self.assertEqual(p.properties.has_key('tag'), True)
# now trying to supply some properties with values and others without
p = particle_array.ParticleArray(x={'default':10.0}, y={'data':[1.0, 2.0]},
z={}, h={'data':[0.1, 0.1]})
self.assertEqual(p.get_number_of_particles(), 2)
self.assertEqual(check_array(p.x, [10., 10.]), True)
self.assertEqual(check_array(p.y, [1., 2.]), True)
self.assertEqual(check_array(p.z, [0, 0]), True)
self.assertEqual(check_array(p.h, [0.1, 0.1]), True)
def test_get_number_of_particles(self):
"""
Tests the get_number_of_particles of particles.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m}, h={'data':h})
self.assertEqual(p.get_number_of_particles(), 4)
def test_get(self):
"""
Tests the get function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m}, h={'data':h})
self.assertEqual(check_array(x, p.get('x')), True)
self.assertEqual(check_array(y, p.get('y')), True)
self.assertEqual(check_array(z, p.get('z')), True)
self.assertEqual(check_array(m, p.get('m')), True)
self.assertEqual(check_array(h, p.get('h')), True)
def test_set(self):
"""
Tests the set function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h})
# set the x array with new values
p.set(**{'x':[4., 3, 2, 1], 'h':[0.2, 0.2, 0.2, 0.2]})
self.assertEqual(check_array(p.get('x'), [4., 3, 2, 1]), True)
self.assertEqual(check_array(p.get('h'), [0.2, 0.2, 0.2, 0.2]), True)
# trying to set the tags
p.set(**{'tag':[0, 1, 1, 1]})
self.assertEqual(check_array(p.get('tag', only_real_particles=False)
, [0, 1, 1, 1]), True)
self.assertEqual(check_array(p.get('tag'), [0]), True)
# try setting array with smaller length array.
p.set(**{'x':[5, 6, 7]})
self.assertEqual(check_array(p.get('x', only_real_particles=False),
[5, 6, 7, 1]), True)
# try setting array with longer array.
self.assertRaises(ValueError, p.set, **{'x':[1., 2, 3, 5, 6]})
def test_add_temporary_array(self):
"""
Tests the add_temporary_array function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y}, z={'data':z}, m={'data':m}, h={'data':h})
# make sure the temporary_arrays dict is empty.
self.assertEqual(p.temporary_arrays, {})
# now add some temporary arrays.
p.add_temporary_array('temp1')
p.add_temporary_array('temp2')
# get the arrays and make sure they are of correct size.
self.assertEqual(p.get('temp1').size == 4, True)
self.assertEqual(p.get('temp2').size == 4, True)
# try to add temporary array with name as some property.
self.assertRaises(ValueError, p.add_temporary_array, 'x')
# try setting a temporary array.
p.set(**{'temp1':[2, 4, 3, 1]})
self.assertEqual(check_array(p.get('temp1'), [2, 4, 3, 1]), True)
def test_clear(self):
"""
Tests the clear function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h})
p.clear()
self.assertEqual(len(p.properties), 4)
self.assertEqual(p.properties.has_key('tag'), True)
self.assertEqual(p.properties['tag'].length, 0)
self.assertEqual(p.properties.has_key('group'), True)
self.assertEqual(p.properties['group'].length, 0)
self.assertEqual(p.properties.has_key('local'), True)
self.assertEqual(p.properties['local'].length, 0)
self.assertEqual(p.properties.has_key('pid'), True)
self.assertEqual(p.properties['pid'].length, 0)
self.assertEqual(p.is_dirty, True)
self.assertEqual(p.temporary_arrays, {})
def test_getattr(self):
"""
Tests the __getattr__ function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h})
self.assertEqual(check_array(x, p.x), True)
self.assertEqual(check_array(y, p.y), True)
self.assertEqual(check_array(z, p.z), True)
self.assertEqual(check_array(m, p.m), True)
self.assertEqual(check_array(h, p.h), True)
# try getting an non-existant attribute
self.assertRaises(AttributeError, p.__getattr__, 'a')
def test_setattr(self):
"""
Tests the __setattr__ function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h})
p.x = p.x*2.0
self.assertEqual(check_array(p.get('x'), [2., 4, 6, 8]), True)
p.x = p.x + 3.0*p.x
self.assertEqual(check_array(p.get('x'), [8., 16., 24., 32.]), True)
def test_remove_particles(self):
"""
Tests the remove_particles function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h})
p.add_temporary_array('tmp1')
remove_arr = LongArray(0)
remove_arr.append(0)
remove_arr.append(1)
p.remove_particles(remove_arr)
self.assertEqual(p.get_number_of_particles(), 2)
self.assertEqual(check_array(p.x, [3., 4.]), True)
self.assertEqual(check_array(p.y, [2., 3.]), True)
self.assertEqual(check_array(p.z, [0., 0.]), True)
self.assertEqual(check_array(p.m, [1., 1.]), True)
self.assertEqual(check_array(p.h, [.1, .1]), True)
self.assertEqual(len(p.tmp1), 2)
# now try invalid operatios to make sure errors are raised.
remove_arr.resize(10)
self.assertRaises(ValueError, p.remove_particles, remove_arr)
# now try to remove a particle with index more that particle
# length.
remove_arr.resize(1)
remove_arr[0] = 2
p.remove_particles(remove_arr)
# make sure no change occurred.
self.assertEqual(p.get_number_of_particles(), 2)
self.assertEqual(check_array(p.x, [3., 4.]), True)
self.assertEqual(check_array(p.y, [2., 3.]), True)
self.assertEqual(check_array(p.z, [0., 0.]), True)
self.assertEqual(check_array(p.m, [1., 1.]), True)
self.assertEqual(check_array(p.h, [.1, .1]), True)
self.assertEqual(len(p.tmp1), 2)
def test_add_particles(self):
"""
Tests the add_particles function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h})
p.add_temporary_array('tmp1')
p.set_dirty(False)
new_particles = {}
new_particles['x'] = numpy.array([5., 6, 7])
new_particles['y'] = numpy.array([4., 5, 6])
new_particles['z'] = numpy.array([0., 0, 0])
p.add_particles(**new_particles)
self.assertEqual(p.get_number_of_particles(), 7)
self.assertEqual(check_array(p.x, [1., 2, 3, 4, 5, 6, 7]), True)
self.assertEqual(check_array(p.y, [0., 1, 2, 3, 4, 5, 6]), True)
self.assertEqual(check_array(p.z, [0., 0, 0, 0, 0, 0, 0]), True)
self.assertEqual(p.is_dirty, True)
# make sure the other arrays were resized
self.assertEqual(len(p.h), 7)
self.assertEqual(len(p.m), 7)
self.assertEqual(len(p.tmp1), 7)
p.set_dirty(False)
# try adding an empty particle list
p.add_particles(**{})
self.assertEqual(p.get_number_of_particles(), 7)
self.assertEqual(check_array(p.x, [1., 2, 3, 4, 5, 6, 7]), True)
self.assertEqual(check_array(p.y, [0., 1, 2, 3, 4, 5, 6]), True)
self.assertEqual(check_array(p.z, [0., 0, 0, 0, 0, 0, 0]), True)
self.assertEqual(p.is_dirty, False)
# make sure the other arrays were resized
self.assertEqual(len(p.h), 7)
self.assertEqual(len(p.m), 7)
self.assertEqual(len(p.tmp1), 7)
# adding particles with tags
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h})
p.add_particles(x=[5, 6, 7, 8], tag=[1, 1, 0, 0])
self.assertEqual(p.get_number_of_particles(), 8)
self.assertEqual(check_array(p.x, [1, 2, 3, 4, 7, 8]), True)
self.assertEqual(check_array(p.y, [0, 1, 2, 3, 0, 0]), True)
self.assertEqual(check_array(p.z, [0, 0, 0, 0, 0, 0]), True)
def test_remove_tagged_particles(self):
"""
Tests the remove_tagged_particles function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
tag = [1, 1, 1, 0]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h}, tag={'data':tag})
p.add_temporary_array('tmp1')
print p.x, p.tag
p.remove_tagged_particles(0)
self.assertEqual(p.get_number_of_particles(), 3)
self.assertEqual(check_array(p.get('x', only_real_particles=False)
, [1, 2, 3.]), True)
print p.get('x', only_real_particles=False)
self.assertEqual(check_array(p.get('y', only_real_particles=False)
, [0., 1, 2]), True)
self.assertEqual(check_array(p.get('z', only_real_particles=False)
, [0., 0, 0]), True)
self.assertEqual(check_array(p.get('h', only_real_particles=False)
, [.1, .1, .1]), True)
self.assertEqual(check_array(p.get('m', only_real_particles=False)
, [1., 1., 1.]), True)
self.assertEqual(len(p.get('tmp1', only_real_particles=False)), 3)
self.assertEqual(check_array(p.x, []), True)
self.assertEqual(check_array(p.y, []), True)
self.assertEqual(check_array(p.z, []), True)
self.assertEqual(check_array(p.h, []), True)
self.assertEqual(check_array(p.m, []), True)
self.assertEqual(check_array(p.tmp1, []), True)
def test_add_property(self):
"""
Tests the add_property function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
tag = [0, 0, 0, 0]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h}, tag={'data':tag})
p.add_property({'name':'x'})
# make sure the current 'x' property is intact.
self.assertEqual(check_array(p.x, x), True)
# add a property with complete specification
p.add_property({'name':'f1',
'data':[1, 1, 2, 3],
'type':'int',
'default':4})
self.assertEqual(check_array(p.f1, [1, 1, 2, 3]), True)
self.assertEqual(type(p.properties['f1']), IntArray)
self.assertEqual(p.default_values['f1'], 4)
# add a property without specifying the type
p.add_property({'name':'f2',
'data':[1, 1, 2, 3],
'default':4.0})
self.assertEqual(type(p.properties['f2']), DoubleArray)
self.assertEqual(check_array(p.f2, [1, 1, 2, 3]), True)
p.add_property({'name':'f3'})
self.assertEqual(type(p.properties['f3']), DoubleArray)
self.assertEqual(p.properties['f3'].length, p.get_number_of_particles())
self.assertEqual(check_array(p.f3, [0, 0, 0, 0]), True)
p.add_property({'name':'f4', 'default':3.0})
self.assertEqual(type(p.properties['f4']), DoubleArray)
self.assertEqual(p.properties['f4'].length, p.get_number_of_particles())
self.assertEqual(check_array(p.f4, [3, 3, 3, 3]), True)
def test_extend(self):
"""
Tests the extend function.
"""
p = particle_array.ParticleArray(default_particle_tag=10, x={},
y={'default':-1.})
p.extend(5)
self.assertEqual(p.get_number_of_particles(), 5)
self.assertEqual(check_array(p.get(
'x', only_real_particles=False), [0, 0, 0, 0, 0]), True)
self.assertEqual(check_array(p.get('y', only_real_particles=False),
[-1., -1., -1., -1., -1.]), True)
self.assertEqual(check_array(p.get('tag', only_real_particles=False),
[10, 10, 10, 10, 10]), True)
def test_align_particles(self):
"""
Tests the align particles function.
"""
p = particle_array.ParticleArray()
p.add_property({'name':'x', 'data':[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
p.add_property({'name':'y', 'data':[10, 9, 8, 7, 6, 5, 4, 3, 2, 1]})
p.set_dirty(False)
p.set(**{'tag':[0, 0, 1, 1, 1, 0, 4, 0, 1, 5]})
self.assertEqual(check_array(p.get('x', only_real_particles=False),
[1, 2, 6, 8, 5, 3, 7, 4, 9, 10]),
True)
self.assertEqual(check_array(p.get('y', only_real_particles=False),
[10, 9, 5, 3, 6, 8, 4, 7, 2, 1]), True)
self.assertEqual(p.is_dirty, True)
p.set_dirty(False)
p.set(**{'tag':[0, 0, 0, 0, 1, 1, 1, 1, 1, 1]})
self.assertEqual(check_array(p.get('x', only_real_particles=False),
[1, 2, 6, 8, 5, 3, 7, 4, 9, 10]),
True)
self.assertEqual(check_array(p.get('y', only_real_particles=False),
[10, 9, 5, 3, 6, 8, 4, 7, 2, 1]), True)
self.assertEqual(p.is_dirty, False)
def test_append_parray(self):
"""
Tests the append_parray function.
"""
p1 = particle_array.ParticleArray()
p1.add_property({'name':'x', 'data':[1, 2, 3]})
p1.align_particles()
p2 = particle_array.ParticleArray(x={'data':[4, 5, 6]},
y={'data':[1, 2, 3 ]},
tag={'data':[1, 0, 1]})
p1.append_parray(p2)
print p1.get('x', only_real_particles=False)
print p1.get('y', only_real_particles=False)
print p1.get('tag', only_real_particles=False)
self.assertEqual(p1.get_number_of_particles(), 6)
self.assertEqual(check_array(p1.x, [1, 2, 3, 5]), True)
self.assertEqual(check_array(p1.y, [0, 0, 0, 2]), True)
self.assertEqual(check_array(p1.tag, [0, 0, 0, 0]), True)
def test_copy_properties(self):
"""
Tests the copy properties function.
"""
p1 = particle_array.ParticleArray()
p1.add_property({'name':'x', 'data':[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
p1.add_property({'name':'y'})
p1.add_property({'name':'t'})
p1.align_particles()
p2 = particle_array.ParticleArray()
p2.add_property({'name':'t', 'data':[-1, -1, -1, -1]})
p2.add_property({'name':'s', 'data':[2, 3, 4, 5]})
p2.align_particles()
p1.copy_properties(p2, start_index=5, end_index=9)
self.assertEqual(check_array(p1.t, [0, 0, 0, 0, 0, -1, -1, -1, -1, 0]),
True)
p1.add_property({'name':'s'})
p1.copy_properties(p2, start_index=5, end_index=9)
self.assertEqual(check_array(p1.t, [0, 0, 0, 0, 0, -1, -1, -1, -1, 0]),
True)
self.assertEqual(check_array(p1.s, [0, 0, 0, 0, 0, 2, 3, 4, 5, 0]), True)
def test_pickle(self):
"""
Tests the pickle and unpicle functions
"""
p1 = particle_array.ParticleArray()
x = range(10)
p1.add_property({'name':'x', 'data':[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
p1.add_property({'name':'y'})
p1.add_property({'name':'t'})
p1.align_particles()
s = pickle.dumps(p1)
p2 = pickle.loads(s)
self.assertEqual(len(p1.x), len(p2.x))
check_array(p1.x, p2.x)
if __name__ == '__main__':
import logging
logger = logging.getLogger()
ch = logging.StreamHandler()
logger.addHandler(ch)
unittest.main()
| Python |
class ParticleType:
"""
An empty class to provide an enum for the different particle types
used in PySPH.
The types defined are:
Fluid -- The default particle type used to represent fluids.
Solid -- Use this to represent solids
DummyFluid --
Probe --
Boundary -- Boundary particles that contribute to forces but
inherit properties from other particles. Use this to avoid
particle deficiency near boundaries.
"""
Fluid = 0
Solid = 1
DummyFluid = 2
Probe = 3
Boundary = 4
def __init__(self):
"""
Constructor.
We do not allow this class to be instantiated. Only the class attributes
are directly accessed. Instantiation will raise an error.
"""
raise SystemError, 'Do not instantiate the ParticleType class'
| Python |
"""
Extension to the dictionary class to access keys as attributes.
"""
class AttrDict(dict):
"""
Extension to the dictionary class to access keys as attributes.
If a key that is not present is accessed as an attribute, an AttributeError
is raised. To add a new key access the dictionary is the normal fashion
(using the [] operator) to set the value.
"""
def __setattr__(self, key, value):
"""
"""
if dict.has_key(self, key):
self[key] = value
else:
raise AttributeError, key
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError, key
| Python |
from cell import CellManager
from nnps import NNPSManager, NeighborLocatorType
from particle_array import ParticleArray
from particle_types import ParticleType
from domain_manager import DomainManagerType as CLDomain
from locator import OpenCLNeighborLocatorType as CLLocator
import locator
import domain_manager
from pysph.solver.cl_utils import HAS_CL
if HAS_CL:
import pyopencl as cl
Fluid = ParticleType.Fluid
Solid = ParticleType.Solid
Probe = ParticleType.Probe
DummyFluid = ParticleType.DummyFluid
Boundary = ParticleType.Boundary
SPHNeighborLocator = NeighborLocatorType.SPHNeighborLocator
# MPI conditional imports
HAS_MPI = True
try:
from mpi4py import MPI
except ImportError:
HAS_MPI = False
else:
from pysph.parallel.parallel_cell import ParallelCellManager
import numpy
class Particles(object):
""" A collection of particles and related data structures that
hat define an SPH simulation.
In pysph, particle properties are stored in a ParticleArray. The
array may represent a particular type of particle (solid, fluid
etc). Valid types are defined in base.particle_types.
Indexing of the particles is performed by a CellManager and
nearest neighbors are obtained via an instance of NNPSManager.
Particles is a collection of these data structures to provide a
single point access to
(a) Hold all particle information
(b) Update the indexing scheme when particles have moved.
(d) Update remote particle properties in parallel runs.
(e) Barrier synchronizations across processors
Data Attributes:
----------------
arrays -- a list of particle arrays in the simulation.
cell_manager -- the CellManager for spatial indexing.
nnps_manager -- the NNPSManager for neighbor queries.
correction_manager -- a kernel KernelCorrectionManager if kernel
correction is used. Defaults to None
misc_prop_update_functions -- A list of functions to evaluate
properties at the beginning of a sub step.
variable_h -- boolean indicating if variable smoothing lengths are
considered. Defaults to False
in_parallel -- boolean indicating if running in parallel. Defaults to False
load_balancing -- boolean indicating if load balancing is required.
Defaults to False.
pid -- processor id if running in parallel
Example:
---------
In [1]: import pysph.base.api as base
In [2]: x = linspace(-pi,pi,101)
In [3]: pa = base.get_particle_array(x=x)
In [4]: particles = base.Particles(arrays=[pa], in_parallel=True,
load_balancing=False, variable_h=True)
Notes:
------
An appropriate cell manager (CellManager/ParallelCellManager) is
created with reference to the 'in_parallel' attribute.
Similarly an appropriate NNPSManager is created with reference to
the 'variable_h' attribute.
"""
def __init__(self, arrays=[], in_parallel=False, variable_h=False,
load_balancing=True,
locator_type = SPHNeighborLocator,
periodic_domain=None,
min_cell_size=-1,
max_cell_size=0,
max_radius_scale=2,
update_particles=True):
""" Constructor
Parameters:
-----------
arrays -- list of particle arrays in the simulation
in_parallel -- flag for parallel runs
variable_h -- flag for variable smoothing lengths
load_balancing -- flag for dynamic load balancing.
periodic_domain -- the periodic domain for periodicity
"""
# set the flags
self.variable_h = variable_h
self.in_parallel = in_parallel
self.load_balancing = load_balancing
self.locator_type = locator_type
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.periodic_domain = periodic_domain
self.parallel_manager = None
self.max_radius_scale = max_radius_scale
# Some sanity checks on the input arrays.
assert len(arrays) > 0, "Particles must be given some arrays!"
prec = arrays[0].cl_precision
msg = "All arrays must have the same cl_precision"
for arr in arrays[1:]:
assert arr.cl_precision == prec, msg
self.arrays = arrays
self.kernel = None
# set defaults
self.correction_manager = None
self.misc_prop_update_functions = []
# initialize the cell manager and nnps manager
self.initialize()
def initialize(self):
""" Perform all initialization tasks here """
# create the cell manager
#if not self.in_parallel:
self.cell_manager = CellManager(arrays_to_bin=self.arrays,
min_cell_size=self.min_cell_size,
max_cell_size=self.max_cell_size,
max_radius_scale=self.max_radius_scale,
periodic_domain=self.periodic_domain)
#else:
# self.cell_manager = ParallelCellManager(
# arrays_to_bin=self.arrays, load_balancing=self.load_balancing)
#self.pid = self.cell_manager.pid
# create the nnps manager
self.nnps_manager = NNPSManager(cell_manager=self.cell_manager,
variable_h=self.variable_h,
locator_type=self.locator_type)
# call an update on the particles (i.e index)
self.update()
def update(self, cache_neighbors=False):
""" Update the status of the Particles.
Parameters:
-----------
cache_neighbors -- flag for caching kernel interactions
Notes:
-------
This function must be called whenever particles have moved and
the indexing structure invalid. After a call to this function,
particle neighbors will be accurately returned.
Since particles move at the end of an integration
step/sub-step, we may perform any other operation that would
be required for the subsequent step/sub-step. Examples of
these are summation density, equation of state, smoothing
length updates, evaluation of velocity divergence/vorticity
etc.
All other properties may be updated by appending functions to
the list 'misc_prop_update_functions'. These functions must
implement an 'eval' method which takes no arguments. An example
is the UpdateDivergence function in 'sph.update_misc_props.py'
"""
pm = self.parallel_manager
if pm is not None:
pm.update()
err = self.nnps_manager.py_update()
assert err != -1, 'NNPSManager update failed! '
# update any other properties (rho, p, cs, div etc.)
self.evaluate_misc_properties()
# evaluate kernel correction terms
if self.correction_manager:
self.correction_manager.update()
def evaluate_misc_properties(self):
""" Evaluate properties from the list of functions. """
for func in self.misc_prop_update_functions:
func.eval()
def add_misc_function(self, func):
""" Add a function to be performed when particles are updated
Parameters:
-----------
func -- The function to perform.
Example:
--------
The conduction coefficient required for the artificial heat
requires the velocity divergence at a particle. This must be
available at the start of every substep of an integration step.
"""
#calcs = operation.get_calcs(self, kernel)
self.misc_prop_update_functions.append(func)
def get_named_particle_array(self, name):
""" Return the named particle array if it exists """
has_array = False
for array in self.arrays:
if array.name == name:
arr = array
has_array = True
if has_array:
return arr
else:
print 'Array %s does not exist!' %(name)
def update_remote_particle_properties(self, props=None):
""" Perform a remote particle property update.
This function needs to be called when the remote particles
on one processor need to be updated on account of computations
on another physical processor.
"""
if self.in_parallel:
self.parallel_manager.update_remote_particle_properties(props=props)
def barrier(self):
""" Synchronize all processes """
if self.in_parallel:
self.parallel_manager.parallel_controller.comm.barrier()
def get_global_min_max(self, props):
""" Find the global minimum and maximum values.
Parameters:
-----------
props : dict
A dict of local properties for which we want global values.
"""
data_min = {}
data_max = {}
for prop in props:
data_min[prop] = props[prop]
data_max[prop] = props[prop]
pc = self.parallel_manager.parallel_controller
glb_min, glb_max = pc.get_glb_min_max(data_min, data_max)
return glb_min, glb_max
@classmethod
def get_neighbor_particle_locator(self, src, dst,
locator_type = SPHNeighborLocator,
variable_h=False, radius_scale=2.0):
""" Return a neighbor locator from the NNPSManager """
cell_manager = CellManager(arrays_to_bin=[src, dst])
nnps_manager = NNPSManager(cell_manager, locator_type=locator_type,
variable_h=variable_h)
return nnps_manager.get_neighbor_particle_locator(
src, dst, radius_scale)
class CLParticles(Particles):
""" A collection of ParticleArrays for use with OpenCL.
CLParticles is modelled very closely on `Particles` which is
intended for Cython computations.
Use CLParticles when using a CLCalc with OpenCL.
Attributes:
-----------
arrays : list
The list of arrays considered in the solution
with_cl : bool {True}
Duh
domain_manager_type : int base.DomainManagerType
A domain manager is used to spatially index the particles and provide
an interface which is accesible and comprehensible to an appropriate
OpenCLNeighborLocator object.
Acceptable values are:
(1) base.DomainManagerType.LinkedListManager : Indexing based on the
linked list structure defined by Hockney and Eastwood.
(2) base.DomainManagerType.DomainManager : No indexing. Intended to
be used for all pair neighbor searches.
cl_locator_type : int base.OpenCLNeighborLocatorType
A neighbor locator is in cahoots with the DomainManager to provide
near neighbors for a particle upon a query.
Acceptable values are:
(1) base.OpenCLNeighborLocatorType.LinkedListSPHNeighborLocator :
A neighbor locator that uses the linked list structure of the
LinkedListManager to provide neighbors in an SPH context. That
is, nearest neighbors are particles in the 27 neighboring cells
for the destination particle.
(2) base.OpenCLNeighborLocatorType.AllPairNeighborLocator :
A trivial locator that essentially returns all source particles
as near neighbors for any query point.
"""
def __init__(self, arrays,
domain_manager_type=CLDomain.DomainManager,
cl_locator_type=CLLocator.AllPairNeighborLocator):
self.arrays = arrays
self.with_cl = True
self.domain_manager_type = domain_manager_type
self.cl_locator_type = cl_locator_type
self.in_parallel = False
def get_cl_precision(self):
"""Return the cl_precision used by the Particle Arrays.
This property cannot be set it is set at construction time for
the Particle arrays. This is simply a convenience function to
query the cl_precision.
"""
# ensure that all arrays have the same precision
narrays = len(self.arrays)
if ( narrays > 1 ):
for i in range(1, narrays):
assert self.arrays[i-1].cl_precision == \
self.arrays[i].cl_precision
return self.arrays[0].cl_precision
def setup_cl(self, context):
""" OpenCL setup given a context.
Parameters:
-----------
context : pyopencl.Context
The context is used to instantiate the domain manager, the
type of which is determined from the attribute
`domain_manager_type`.
I expect this function to be called from the associated
CLCalc, from within it's `setup_cl` method. The point is that
the same context is used for the Calc, the DomainManager and
the underlying ParticleArrays. This is important as a mix of
contexts will result in crashes.
The DomainManager is updated after creation. This means that
the data is ready to be used by the SPHFunction OpenCL
kernels.
"""
self.with_cl = True
self.context = context
# create the domain manager.
self.domain_manager = self.get_domain_manager(context)
# Update the domain manager
self.domain_manager.update()
def get_domain_manager(self, context):
""" Get the domain manager from type. """
if self.domain_manager_type == CLDomain.DomainManager:
return domain_manager.DomainManager(
arrays = self.arrays, context = context
)
if self.domain_manager_type == CLDomain.LinkedListManager:
return domain_manager.LinkedListManager(
arrays=self.arrays, context = context
)
if self.domain_manager_type == CLDomain.RadixSortManager:
return domain_manager.RadixSortManager(
arrays=self.arrays, context = context
)
else:
msg = "Manager type %s not understood!"%(self.domain_manager_type)
raise ValueError(msg)
def get_neighbor_locator(self, source, dest, scale_fac=2.0):
""" Return an OpenCLNeighborLocator between a source and
destination.
Parameters:
-----------
source : ParticleArray
The source particle array
dest : ParticleArray
The destination particle array
scale_fac : float
NOTIMPLEMENTED. The scale facor to determine the effective
cutoff radius.
Note:
-----
An error is raised if a linked list neighbor locator is
requested with a domain manager other than the
LinkedListManager.
"""
if self.cl_locator_type == \
CLLocator.AllPairNeighborLocator:
return locator.AllPairNeighborLocator(source=source, dest=dest)
if self.cl_locator_type == \
CLLocator.LinkedListSPHNeighborLocator:
if not self.domain_manager_type == \
CLDomain.LinkedListManager:
raise RuntimeError
return locator.LinkedListSPHNeighborLocator(
manager=self.domain_manager, source=source, dest=dest,
scale_fac=scale_fac)
if self.cl_locator_type == \
CLLocator.RadixSortNeighborLocator:
if not self.domain_manager_type == \
CLDomain.RadixSortManager:
raise RuntimeError
return locator.RadixSortNeighborLocator(
manager=self.domain_manager, source=source, dest=dest,
scale_fac=scale_fac)
def update(self):
""" Update the spatial index of the particles.
First check if the domain manager needs an update by calling
it's update_status method and then proceed with the update.
The reason this is done is to avoid any repeated updates.
"""
self.domain_manager.update()
def read_from_buffer(self):
""" Read the buffer contents for all the arrays """
for pa in self.arrays:
pa.read_from_buffer()
###############################################################################
| Python |
"""API module to simplify import of common names from pysph.base package"""
# fast utils
from fast_utils import arange_long
# carray
from carray import LongArray, DoubleArray, IntArray, FloatArray
from cell import Cell, CellManager, PeriodicDomain
from kernels import KernelBase, DummyKernel, CubicSplineKernel, \
HarmonicKernel, GaussianKernel, M6SplineKernel, W8Kernel, W10Kernel,\
QuinticSplineKernel, WendlandQuinticSplineKernel, Poly6Kernel
from nnps import NbrParticleLocatorBase, FixedDestNbrParticleLocator, \
VarHNbrParticleLocator, NNPSManager, brute_force_nnps
from nnps import NeighborLocatorType
from particle_array import ParticleArray, get_particle_array
from particles import Particles, CLParticles
from point import Point, IntPoint
# ParticleTypes
from particle_types import ParticleType
Fluid = ParticleType.Fluid
Solid = ParticleType.Solid
Boundary = ParticleType.Boundary
Probe = ParticleType.Probe
DummyFluid = ParticleType.DummyFluid
from geometry import MeshPoint, Line, Geometry
# LinkedListManager
from domain_manager import LinkedListManager, DomainManager, RadixSortManager, \
DomainManagerType
# OpenCL locator
from locator import OpenCLNeighborLocator, LinkedListSPHNeighborLocator, \
AllPairNeighborLocator, RadixSortNeighborLocator
from locator import OpenCLNeighborLocatorType
# radix sort
from radix_sort import AMDRadixSort
import nnps_util
| Python |
''' Implement infrastructure for the solver to add various interfaces '''
from functools import wraps, partial
import threading, thread
from pysph.base.particle_array import ParticleArray
import logging
logger = logging.getLogger()
class DummyComm(object):
''' A dummy MPI.Comm implementation as placeholder for for serial runs '''
def Get_size(self):
''' return the size of the comm (1) '''
return 1
def Get_rank(self):
''' return the rank of the process (0) '''
return 0
def send(self, data, pid):
''' dummy send implementation '''
self.data = data
def recv(self, pid):
''' dummy recv implementation '''
data = self.data
del self.data
return data
def bcast(self, data):
''' bcast (broadcast) implementation for serial run '''
return data
def gather(self, data):
''' gather implementation for serial run '''
return [data]
def synchronized(lock_or_func):
''' decorator for synchronized (thread safe) function
Usage:
- sync_func = synchronized(lock)(func) # sync with an existing lock
- sync_func = synchronized(func) # sync with a new private lock
'''
if isinstance(lock_or_func, thread.LockType):
lock = lock_or_func
def synchronized_inner(func):
@wraps(func)
def wrapped(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return wrapped
return synchronized_inner
else:
func = lock_or_func
lock = threading.Lock()
return synchronized(lock)(func)
def wrap_dispatcher(obj, meth, *args2, **kwargs2):
@wraps(meth)
def wrapped(*args, **kwargs):
kw = {}
kw.update(kwargs2)
kw.update(kwargs)
return meth(obj.block, *(args2+args), **kw)
return wrapped
class Controller(object):
''' Controller class acts a a proxy to control the solver
This is passed as an argument to the interface
**Methods available**:
- get -- get the value of a solver parameter
- set -- set the value of a solver parameter
- get_result -- return result of a queued command
- pause_on_next -- pause solver thread on next iteration
- wait -- wait (block) calling thread till solver is paused
(call after `pause_on_next`)
- cont -- continue solver thread (call after `pause_on_next`)
Various other methods are also available as listed in
:data:`CommandManager.dispatch_dict` which perform different functions.
- The methods in CommandManager.active_methods do their operation and return
the result (if any) immediately
- The methods in CommandManager.lazy_methods do their later when solver
thread is available and return a task-id. The result of the task can be
obtained later using the blocking call `get_result()` which waits till
result is available and returns the result.
The availability of the result can be checked using the lock returned
by `get_task_lock()` method
FIXME: wait/cont currently do not work in parallel
'''
def __init__(self, command_manager, block=True):
super(Controller, self).__init__()
self.__command_manager = command_manager
self.daemon = True
self.block = block
self._set_methods()
def _set_methods(self):
for prop in self.__command_manager.solver_props:
setattr(self, 'get_'+prop, wrap_dispatcher(self, self.__command_manager.dispatch, 'get', prop))
setattr(self, 'set_'+prop, wrap_dispatcher(self, self.__command_manager.dispatch, 'set', prop))
for meth in self.__command_manager.solver_methods:
setattr(self, meth, wrap_dispatcher(self, self.__command_manager.dispatch, meth))
for meth in self.__command_manager.lazy_methods:
setattr(self, meth, wrap_dispatcher(self, self.__command_manager.dispatch, meth))
for meth in self.__command_manager.active_methods:
setattr(self, meth, wrap_dispatcher(self, self.__command_manager.dispatch, meth))
def get(self, name):
''' get a solver property; returns immediately '''
return self.__command_manager.dispatch(self.block, 'get', name)
def set(self, name, value):
''' set a solver property; returns immediately; '''
return self.__command_manager.dispatch(self.block, 'set', name, value)
def pause_on_next(self):
''' pause the solver thread on next iteration '''
return self.__command_manager.pause_on_next()
def wait(self):
''' block the calling thread until the solver thread pauses
call this only after calling the `pause_on_next` method to tell
the controller to pause the solver thread'''
self.__command_manager.wait()
return True
def get_prop_names(self):
return list(self.__command_manager.solver_props)
def cont(self):
''' continue solver thread after it has been paused by `pause_on_next`
call this only after calling the `pause_on_next` method '''
return self.__command_manager.cont()
def get_result(self, task_id):
''' get the result of a previously queued command '''
return self.__command_manager.get_result(task_id)
def set_blocking(self, block):
''' set the blocking mode to True/False
In blocking mode (block=True) all methods other than getting of
solver properties block until the command is executed by the solver
and return the results. The blocking time can vary depending on the
time taken by solver per iteration and the command_interval
In non-blocking mode, these methods queue the command for later
and return a string corresponding to the task_id of the operation.
The result can be later obtained by a (blocking) call to get_result
with the task_id as argument
'''
if block != self.block:
self.block = block
self._set_methods()
return self.block
def get_blocking(self):
''' get the blocking mode ( True/False ) '''
return self.block
def ping(self):
return True
def on_root_proc(f):
''' run the decorated function only on the root proc '''
@wraps(f)
def wrapper(self, *args, **kwds):
if self.comm.Get_rank()==0:
return f(self, *args, **kwds)
return wrapper
def in_parallel(f):
''' return a list of results of running decorated function on all procs '''
@wraps(f)
def wrapper(self, *args, **kwds):
return self.comm.gather(f(self, *args, **kwds))
return wrapper
class CommandManager(object):
''' Class to manage and synchronize commands from various Controllers '''
solver_props = set(('t', 'tf', 'dt', 'count', 'pfreq', 'fname',
'detailed_output', 'output_directory', 'command_interval'))
solver_methods = set(('dump_output',))
lazy_methods = set(('get_particle_array_names', 'get_named_particle_array',
'get_particle_array_combined', 'get_particle_array_from_procs'))
active_methods = set(('get_status', 'get_task_lock', 'set_log_level'))
def __init__(self, solver, comm=None):
if comm is not None:
self.comm = comm
else:
try:
self.comm = solver.particles.cell_manager.parallel_controller.comm
except AttributeError:
self.comm = DummyComm()
logger.info('CommandManager: using comm: %s'%self.comm)
self.solver = solver
self.interfaces = []
self.func_dict = {}
self.rlock = threading.RLock()
self.res_lock = threading.Lock()
self.plock = threading.Condition()
self.qlock = threading.Condition() # queue lock
self.queue = []
self.queue_dict = {}
self.queue_lock_map = {}
self.results = {}
self.pause = set([])
@on_root_proc
def add_interface(self, callable, block=True):
''' Add a callable interface to the controller
The callable must accept an Controller instance argument.
The callable is called in a new thread of its own and it can
do various actions with methods defined on the Controller
instance passed to it
The new created thread is set to daemon mode and returned
'''
logger.info('adding_interface: %s'%callable)
control = Controller(self, block)
thr = threading.Thread(target=callable, args=(control,))
thr.daemon = True
thr.start()
return thr
def add_function(self, callable, interval=1):
''' add a function to to be called every `interval` iterations '''
l = self.func_dict[interval] = self.func_dict.get(interval, [])
l.append(callable)
def execute_commands(self, solver):
''' called by the solver after each timestep '''
# TODO: first synchronize all the controllers in different processes
# using mpi
self.sync_commands()
with self.qlock:
self.run_queued_commands()
logger.info('control handler: count=%d'%solver.count)
for interval in self.func_dict:
if solver.count%interval == 0:
for func in self.func_dict[interval]:
func(solver)
self.wait_for_cmd()
def wait_for_cmd(self):
''' wait for command from any interface '''
with self.qlock:
while self.pause:
with self.plock:
self.plock.notify_all()
self.qlock.wait()
self.run_queued_commands()
def sync_commands(self):
''' send the pending commands to all the procs in parallel run '''
self.queue_dict, self.queue, self.pause = self.comm.bcast((self.queue_dict, self.queue, self.pause))
def run_queued_commands(self):
while self.queue:
lock_id = self.queue.pop(0)
meth, args, kwargs = self.queue_dict[lock_id]
with self.res_lock:
try:
self.results[lock_id] = self.run_command(meth, args, kwargs)
finally:
del self.queue_dict[lock_id]
if self.comm.Get_rank()==0:
self.queue_lock_map[lock_id].release()
def run_command(self, cmd, args=[], kwargs={}):
res = self.dispatch_dict[cmd](self, *args, **kwargs)
logger.info('controller: running_command: %s %s %s %s'%(
cmd, args, kwargs, res))
return res
def pause_on_next(self):
''' pause and wait for command on the next control interval '''
if self.comm.Get_size() > 1:
logger.info('pause/continue noy yet supported in parallel runs')
return False
with self.plock:
self.pause.add(threading.current_thread().ident)
self.plock.notify()
return True
def wait(self):
with self.plock:
self.plock.wait()
def cont(self):
''' continue after a pause command '''
if self.comm.Get_size() > 1:
logger.info('pause/continue noy yet supported in parallel runs')
return
with self.plock:
self.pause.remove(threading.current_thread().ident)
self.plock.notify()
with self.qlock:
self.qlock.notify_all()
def get_result(self, lock_id):
''' get the result of a previously queued command '''
lock_id = int(lock_id)
lock = self.queue_lock_map[lock_id]
with lock:
with self.res_lock:
ret = self.results[lock_id]
del self.results[lock_id]
del self.queue_lock_map[lock_id]
return ret
def get_task_lock(self, lock_id):
''' get the Lock instance associated with a command '''
return self.queue_lock_map[int(lock_id)]
def get_prop(self, name):
''' get a solver property '''
return getattr(self.solver, name)
def set_prop(self, name, value):
''' set a solver property '''
return setattr(self.solver, name, value)
def solver_method(self, name, *args, **kwargs):
''' execute a method on the solver '''
ret = getattr(self.solver, name)(*args, **kwargs)
ret = self.comm.gather(ret)
return ret
def get_particle_array_names(self):
''' get the names of the particle arrays '''
return [pa.name for pa in self.solver.particles.arrays]
def get_named_particle_array(self, name, props=None):
for pa in self.solver.particles.arrays:
if pa.name == name:
if props:
return [getattr(pa, p) for p in props if hasattr(pa, p)]
else:
return pa
def get_particle_array_index(self, name):
''' get the index of the named particle array '''
for i,pa in enumerate(self.solver.particles.arrays):
if pa.name == name:
return i
def get_particle_array_from_procs(self, idx, procs=None):
''' get particle array at index from all processes
specifying processes is currently not implemented
'''
if procs is None:
procs = range(self.comm.size)
pa = self.solver.particles.arrays[idx]
pas = self.comm.gather(pa)
return pas
def get_particle_array_combined(self, idx, procs=None):
''' get a single particle array with combined data from all procs
specifying processes is currently not implemented
'''
if procs is None:
procs = range(self.comm.size)
pa = self.solver.particles.arrays[idx]
pas = self.comm.gather(pa)
pa = ParticleArray(name=pa.name)
for p in pas:
pa.append_parray(p)
return pa
def get_status(self):
''' get the status of the controller '''
return 'commands queued: %d'%len(self.queue)
def set_log_level(self, level):
''' set the logging level '''
logger.setLevel(level)
dispatch_dict = {'get':get_prop, 'set':set_prop}
for meth in solver_methods:
dispatch_dict[meth] = solver_method
for meth in lazy_methods:
dispatch_dict[meth] = locals()[meth]
for meth in active_methods:
dispatch_dict[meth] = locals()[meth]
@synchronized
def dispatch(self, block, meth, *args, **kwargs):
''' execute/queue a command with specified arguments '''
if meth in self.dispatch_dict:
if meth=='get' or meth=='set':
prop = args[0]
if prop not in self.solver_props:
raise RuntimeError('Invalid dispatch on method: %s with '
'non-existant property: %s '%(meth,prop))
if block or meth=='get' or meth in self.active_methods:
logger.info('controller: immediate dispatch(): %s %s %s'%(
meth, args, kwargs))
return self.dispatch_dict[meth](self, *args, **kwargs)
else:
lock = threading.Lock()
lock.acquire()
lock_id = id(lock)
with self.qlock:
self.queue_lock_map[lock_id] = lock
self.queue_dict[lock_id] = (meth, args, kwargs)
self.queue.append(lock_id)
logger.info('controller: dispatch(%d): %s %s %s'%(
lock_id, meth, args, kwargs))
return str(lock_id)
else:
raise RuntimeError('Invalid dispatch on method: '+meth)
| Python |
"""
Module contains some common functions.
"""
# standard imports
import pickle
import numpy
import sys
import os
import platform
import commands
from numpy.lib import format
HAS_PBAR = True
try:
import progressbar
except ImportError:
HAS_PBAR = False
import pysph
def check_array(x, y):
"""Check if two arrays are equal with an absolute tolerance of
1e-16."""
return numpy.allclose(x, y, atol=1e-16, rtol=0)
def zipfile_factory(*args, **kwargs):
import zipfile
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
*args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
**kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with *args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with **kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
See Also
--------
numpy.savez_compressed : Save several arrays into a compressed .npz file
format
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : string
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
msg = "Cannot use un-named variables and keyword %s" % key
raise ValueError, msg
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, numpy.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
#############################################################################
def get_distributed_particles(pa, comm, cell_size):
# FIXME: this can be removed once the examples all use Application.
from pysph.parallel.load_balancer import LoadBalancer
rank = comm.Get_rank()
num_procs = comm.Get_size()
if rank == 0:
lb = LoadBalancer.distribute_particles(pa, num_procs=num_procs,
block_size=cell_size)
else:
lb = None
particles = comm.scatter(lb, root=0)
return particles
################################################################################
# `PBar` class.
###############################################################################
class PBar(object):
"""A simple wrapper around the progressbar so it works if a user has
it installed or not.
"""
def __init__(self, maxval, show=True):
bar = None
self.count = 0
self.maxval = maxval
self.show = show
if HAS_PBAR and show:
widgets = [progressbar.Percentage(), ' ', progressbar.Bar(),
progressbar.ETA()]
bar = progressbar.ProgressBar(widgets=widgets,
maxval=maxval).start()
self.bar = bar
def update(self):
self.count += 1
if self.bar is not None:
self.bar.update(self.count)
elif self.show:
sys.stderr.write('\r%d%%'%int(self.count*100/self.maxval))
sys.stderr.flush()
def finish(self):
if self.bar is not None:
self.bar.finish()
elif self.show:
sys.stderr.write('\r100%\n')
sys.stderr.flush()
##############################################################################
# friendly mkdir from http://code.activestate.com/recipes/82465/.
##############################################################################
def mkdir(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
mkdir(head)
if tail:
try:
os.mkdir(newdir)
# To prevent race in mpi runs
except OSError as e:
import errno
if e.errno == errno.EEXIST and os.path.isdir(newdir):
pass
else:
raise
##############################################################################
# read pickled data from a file
##############################################################################
def get_pickled_data(fname):
f = open(fname, 'r')
data = pickle.load(f)
f.close()
return data
def get_pysph_root():
return os.path.split(pysph.__file__)[0]
##############################################################################
# Load an output file
##############################################################################
def load(fname):
""" Load and return data from an output (.npz) file dumped by PySPH.
For output file version 1, the function returns a dictionary with
the keys:
solver_data : Solver constants at the time of output like time,
time step and iteration count.
arrays : ParticleArrays keyed on names with the ParticleArray
object as value.
"""
from pysph.base.particle_array import get_particle_array
data = numpy.load(fname)
ret = {"arrays":{}}
if not 'version' in data.files:
msg = "Wrong file type! No version nnumber recorded."
raise RuntimeError(msg)
version = data['version']
if version == 1:
arrays = data["arrays"]
arrays.shape = (1,)
arrays = arrays[0]
solver_data = data["solver_data"]
solver_data.shape = (1,)
solver_data = solver_data[0]
for array_name in arrays:
array = get_particle_array(name=array_name,
cl_precision="single",
**arrays[array_name])
ret["arrays"][array_name] = array
ret["solver_data"] = solver_data
else:
raise RuntimeError("Version not understood!")
return ret
def load_and_concatenate(prefix,nprocs=1,directory=".",count=None):
"""Load the results from multiple files.
Given a filename prefix and the number of processors, return a
concatenated version of the dictionary returned via load.
Parameters:
-----------
prefix : str
A filename prefix for the output file.
nprocs : int
The number of processors (files) to read
directory : str
The directory for the files
count : int
The file iteration count to read. If None, the last available
one is read
"""
if count is None:
counts = [i.rsplit('_',1)[1][:-4] for i in os.listdir(directory) if i.startswith(prefix) and i.endswith('.npz')]
counts = sorted( [int(i) for i in counts] )
count = counts[-1]
arrays_by_rank = {}
for rank in range(nprocs):
fname = os.path.join(directory, prefix+'_'+str(rank)+'_'+str(count)+'.npz')
data = load(fname)
arrays_by_rank[rank] = data["arrays"]
arrays = _concatenate_arrays(arrays_by_rank, nprocs)
data["arrays"] = arrays
return data
def _concatenate_arrays(arrays_by_rank, nprocs):
"""Concatenate arrays into one single particle array. """
if nprocs <= 0:
return 0
array_names = arrays_by_rank[0].keys()
first_processors_arrays = arrays_by_rank[0]
if nprocs > 1:
ret = {}
for array_name in array_names:
first_array = first_processors_arrays[array_name]
for rank in range(1,nprocs):
other_processors_arrays = arrays_by_rank[rank]
other_array = other_processors_arrays[array_name]
# append the other array to the first array
first_array.append_parray(other_array)
# remove the non local particles
first_array.remove_tagged_particles(1)
ret[array_name] = first_array
else:
ret = arrays_by_rank[0]
return ret
| Python |
""" An example solver for the circular patch of fluid """
import numpy
from optparse import OptionGroup, Option
import pysph.base.api as base
import pysph.sph.api as sph
from pysph.sph.funcs import stress_funcs
from pysph.sph.funcs import eos_funcs
from pysph.sph.funcs import viscosity_funcs
from solver import Solver
from post_step_functions import CFLTimeStepFunction
from sph_equation import SPHOperation, SPHIntegration
from pysph.sph.funcs.arithmetic_funcs import PropertyGet, PropertyAdd
from pysph.sph.funcs.basic_funcs import KernelSum
Fluids = base.ParticleType.Fluid
Solids = base.ParticleType.Solid
def get_particle_array(xsph=True, mart_stress=True, **kwargs):
kwargs.setdefault('type', 1)
kwargs.setdefault('name', 'solid')
pa = base.get_particle_array(**kwargs)
for i in range(3):
for j in range(i+1):
pa.add_property(dict(name='sigma%d%d'%(j,i)))
if xsph:
pa.add_property(dict(name='ubar'))
pa.add_property(dict(name='vbar'))
pa.add_property(dict(name='wbar'))
if mart_stress:
for i in range(3):
for j in range(i+1):
pa.add_property(dict(name='MArtStress%d%d'%(j,i)))
return pa
def get_circular_patch(name="", type=1, dx=0.25):
x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -1.05:1.05+1e-4:dx]
x = x.ravel()
y = y.ravel()
m = numpy.ones_like(x)*dx*dx
h = numpy.ones_like(x)*2*dx
rho = numpy.ones_like(x)
z = 1-rho
p = 0.5*1.0*100*100*(1 - (x**2 + y**2))
cs = numpy.ones_like(x) * 100.0
u = -100*x
v = 100*y
indices = []
for i in range(len(x)):
if numpy.sqrt(x[i]*x[i] + y[i]*y[i]) - 1 > 1e-10:
indices.append(i)
pa = base.get_particle_array(x=x, y=y, m=m, rho=rho, h=h, p=p, u=u, v=v,
cs=cs,name=name, type=type,
sigma00=z, sigma11=z, sigma22=z,
sigma01=z, sigma12=z, sigma02=z)
pa.constants['E'] = 1e9
pa.constants['nu'] = 0.3
pa.constants['G'] = pa.constants['E']/(2.0*1+pa.constants['nu'])
pa.constants['K'] = stress_funcs.get_K(pa.constants['G'], pa.constants['nu'])
pa.constants['rho0'] = 1.0
pa.constants['c_s'] = numpy.sqrt(pa.constants['K']/pa.constants['rho0'])
la = base.LongArray(len(indices))
la.set_data(numpy.array(indices))
pa.remove_particles(la)
pa.set(idx=numpy.arange(len(pa.x)))
print 'Number of particles: ', len(pa.x)
return pa
class StressSolver(Solver):
def __init__(self, dim, integrator_type, xsph=0.5, marts_eps=0.3, marts_n=4,
CFL=None, martv_alpha=1.0, martv_beta=1.0,
co=None, ro=None):
''' constructor
Parameters
----------
xsph : float
correction factor for xsph (0=disabled, default=0.5)
marts_eps : float
correction factor epsilon for Monaghan's artificial stress term
(0=disabled, default=0.3)
marts_n : float
correction factor kernel exponent for Monaghan's
artificial stress term
CFL : float or None
the CFL number if time-step is to be based on CFL (use < 0.3)
dim, integrator_type : see :py:meth:`Solver.__init__`
'''
self.defaults = dict(xsph=xsph,
marts_eps=marts_eps,
marts_n=marts_n,
martv_alpha=martv_alpha,
martv_beta=martv_beta,
cfl=CFL,
co=co,
ro=ro
)
Solver.__init__(self, dim, integrator_type)
def initialize(self):
Solver.initialize(self)
self.print_properties.append('sigma00')
self.print_properties.extend(['sigma01', 'sigma11'])
self.print_properties.extend(['sigma02', 'sigma12', 'sigma22'])
self.print_properties.append('MArtStress00')
self.print_properties.extend(['MArtStress01', 'MArtStress11'])
self.print_properties.extend(['MArtStress02', 'MArtStress12', 'MArtStress22'])
def get_options(self, opt_parser):
opt = OptionGroup(opt_parser, "Stress Solver Options")
opt.add_option('--xsph', action='store', type='float',
dest='xsph', default=self.defaults['xsph'],
help='set the XSPH correction weight factor (default=0.5)')
opt.add_option('--marts_eps', dest='marts_eps', type='float',
default=self.defaults['marts_eps'],
help='set the Monaghan artificial stress weight factor (0.3)')
opt.add_option('--marts_n', dest='marts_n', type='float',
default=self.defaults['marts_n'],
help='set the Monaghan artificial stress exponent (4)')
opt.add_option('--martv_alpha', dest='martv_alpha', type='float',
default=self.defaults['martv_alpha'],
help='set the Monaghan artificial viscosity alpha (1)')
opt.add_option('--martv_beta', dest='martv_beta', type='float',
default=self.defaults['martv_beta'],
help='set the Monaghan artificial viscosity beta (1)')
opt.add_option('--co', dest="co", type="float",
default=self.defaults["co"],
help="Set the reference sound speed c0 ")
opt.add_option("--ro", dest="ro", type="float",
default=self.defaults["ro"],
help="Set the reference density r0")
cfl_opt = Option('--cfl', dest='cfl', type='float',
default=self.defaults['cfl'],
help='set the cfl number for determining the timestep '
'of simulation')
return opt, cfl_opt
def setup_solver(self, options=None):
options = options or self.defaults
xsph = options.get('xsph')
marts_eps = options.get('marts_eps')
marts_n = options.get('marts_n')
martv_alpha = options.get('martv_alpha')
martv_beta = options.get('martv_beta')
cfl = options.get('cfl')
co = options.get("co")
ro = options.get("ro")
# Add the operations
# Equation of state
self.add_operation(SPHOperation(
stress_funcs.BulkModulusPEqn,
on_types=[Solids],
updates=['p'],
id='eos')
)
# Monaghan Artificial Stress
if marts_eps:
self.add_operation(SPHOperation(
stress_funcs.MonaghanArtStressD.withargs(eps=marts_eps),
on_types=[Solids],
updates=['MArtStress00','MArtStress11','MArtStress22'],
id='mart_stress_d')
)
self.add_operation(SPHOperation(
stress_funcs.MonaghanArtStressS.withargs(eps=marts_eps),
on_types=[Solids],
updates=['MArtStress12','MArtStress02','MArtStress01'],
id='mart_stress_s')
)
self.add_operation(SPHIntegration(
stress_funcs.MonaghanArtStressAcc.withargs(n=marts_n),
from_types=[Fluids, Solids], on_types=[Solids],
updates=['u','v','w'],
id='mart_stressacc')
)
# Density Rate
self.add_operation(SPHIntegration(
sph.SPHDensityRate.withargs(hks=False),
from_types=[Solids], on_types=[Solids],
updates=['rho'],
id='density')
)
# Momenttm Equation. Deviatoric stress component
self.add_operation(SPHIntegration(
stress_funcs.SimpleStressAcceleration,
from_types=[Fluids, Solids], on_types=[Solids],
updates=['u','v','w'],
id='stressacc')
)
# Momentum equation. Symmetric component.
self.add_operation(SPHIntegration(
stress_funcs.PressureAcceleration.withargs(alpha=martv_alpha,
beta=martv_beta,
eta=0.0),
from_types=[Fluids, Solids], on_types=[Solids],
updates=['u','v','w'],
id='pacc')
)
# XSPH correction
if xsph:
self.add_operation(SPHIntegration(
sph.XSPHCorrection.withargs(eps=xsph, hks=False),
from_types=[Solids], on_types=[Solids],
updates=['u','v','w'],
id='xsph')
)
# Deviatoric stress rate
self.add_operation(SPHIntegration(
stress_funcs.StressRateD.withargs(xsph=bool(xsph)),
from_types=[Fluids, Solids], on_types=[Solids],
updates=['sigma00','sigma11','sigma22'],
id='stressD')
)
# Deviatoric stress rate
self.add_operation(SPHIntegration(
stress_funcs.StressRateS.withargs(xsph=bool(xsph)),
from_types=[Fluids, Solids], on_types=[Solids],
updates=['sigma12','sigma02','sigma01'],
id='stressS')
)
# Position Stepping
self.add_operation(SPHIntegration(
sph.PositionStepping,
on_types=[Solids],
updates=['x','y','z'],
id='pos')
)
# Time step function
if cfl:
self.pre_step_functions.append(CFLTimeStepFunction(cfl))
#############################################################################
| Python |
from pysph.sph.sph_calc import SPHCalc
from pysph.sph.funcs.arithmetic_funcs import PropertyGet
import numpy
import logging
logger = logging.getLogger()
#############################################################################
#`Integrator` class
#############################################################################
class Integrator(object):
""" The base class for all integrators. Currently, the following
integrators are supported:
(a) Forward Euler Integrator
(b) RK2 Integrator
(c) RK4 Integrator
(d) Predictor Corrector Integrator
(e) Leap Frog Integrator
The integrator operates on a list of SPHCalc objects which define
the interaction between a single destination particle array and a
list of source particle arrays.
An instance of SPHCalc is called a `calc` and thus, the integrator
operates on a list of calcs. The calcs serve as the functions to
be evaluated for the integrator.
A calc can be integrating or non integrating depending on the
operation it represents. For example, the summation density and
density rate operations result in calcs that are non integrating
and integrating respectively. Note that both of them operate on
the same LHS variable, namely the density.
Example:
=========
Consider a dam break simulation with two particle arrays, fluid
and boundary. The operations are
(a) Tait equation (updates=['p','cs'])
(b) Density Rate (updates=['rho'])
(c) Momentum equation with avisc (updates = ['u','v'])
(d) Gravity force (updates = ['u','v'])
(e) Position Stepping (updates=['x','y'])
(f) XSPH Correction (updates=['x','y'])
Integration of this system relies on the use of two dictionaries:
(1) initial_properties
The initial_properties for the integrator would look like:
{
'fluid': {'x':'_x0', 'y':'_y0', 'u':'_u0', 'v':'_v0', ...},
'boundary':{'rho':'_rho0'}
}
that is, the initial_properties serves as a mapping between names
of particle properties that need to be stepped and their initial
values, per particle array. This is needed for multi-step
integrators since we need the final step is with respect to the
initial properties. The initial_properties is used to save out the
properties once at the start of the integration step.
(2) step_props
The step_props dictionary looks like:
{
'fluid':{1:{ 'x':['_x0', '_a_x_1'], 'rho':['_rho0', '_a_rho_1'] ... }
'boundary':{1: {'rho':['_rho0', '_a_rho_1']} }
}
that is, for each stage of the integration (k1, k2..) a dictionary
is stored. This dictionary is keyed on the property to be stepped
and has as value, a list of two strings. The first string is the
name of the intial array for this property to be stepped and the
second is the name of the variable in which the acceleration for
this property is stored.
The initial_properties and step_props dicts are constructed at
setup time while examining the calcs for their update
properties. A single acceleration variable is used for each
property that needs to be stepped.
The naming convention for the acceleration variable is
'_a_<prop_name>_<stage>', thus, the acceleration variable for
velocity at the 2nd stage of an integrator would be '_a_u_2'
Using these two dictionaries, a typical step for the integrator is
the following:
(a) Save Intial Arrays:
------------------------
This is easily done using the initial_properties dict. A call is
made to the particle array to copy over the values as represented
by the mapping.
(b) Reset Accelerations.
------------------------
Since one acceleration variable is used per property to be
stepped, the accelerations must be set to zero before the eval
phase of the integrator. This is because the accelerations will be
appended at each call to the underlying SPHFuncton.
(c) Evaluate the RHS.
-----------------------
Each calc calls it's eval method with appropriate arguments to
store the results of the evaluation.
For integrating calcs, the argument is the acceleration variable
for that property and that stage of the integration. This is where
the step_props dict comes in.
For non integrating calcs, the argument are the update properties
for that calc.
(d) Step
---------
Once the calcs have been evaluated in order, the accelerations are
stored in the appropriate variables for each particle array.
Using the step_props dict, we can step the properties for that
stage.
(e) Update particles
---------------------
Typically, the positions of the particles will be updated in (d)
and this means that the indexing scheme is outdated. This
necessitates an update to recompute the neighbor information.
"""
def __init__(self, particles=None, calcs=[], pcalcs = []):
self.particles = particles
# the calcs used for the RHS evaluations
self.calcs = calcs
# the number of steps for the integrator. Typically equal to
# the number of k arrays required.
self.nsteps = 1
# counter for the current stage of the integrator.
self.cstep = 1
# global and local time
self.time = 0.0
self.local_time = 0.0
# list of particle properties to be updated across processors.
self.rupdate_list = []
# mapping between names of step properties and accelerations
# per stage, per particle array
self.step_props = {}
# mapping between step prop name and it's initial prop name
# per particle array
self.initial_properties = {}
# store the velocity accelerations per array per stage
self.velocity_accelerations = {}
def set_rupdate_list(self):
""" Generate the remote update list.
The format of this list is tied to ParallelCellManager.
"""
for i in range(len(self.particles.arrays)):
self.rupdate_list.append([])
def setup_integrator( self ):
""" Setup the integrator.
This function sets up the initial_properties and step_props
dicts which are used extensively for the integration.
A non-integrating calc is used to update the property of some
variable as a function of other variables ( eg p = f(rho)
).
An integrating calc computes the accelerations for some LHS
property.
During the eval phase, a calc must pass in a string defining
the output arrays to append the RHS result to. For a
non-integrating calc this is simply the calc's update
property. For an integrating calc, the arguments must be the
accelerations for that property.
"""
# save the arrays for easy reference
self.arrays = self.particles.arrays
# intialize the step_props and initial_properties.
for array in self.arrays:
self.step_props[array.name] = {}
self.initial_properties[array.name] = {}
# Initialize the velocity accelerations dict per array
self.velocity_accelerations[array.name] = {}
# step props needs a dict per stage of the integration as well
for k in range(self.nsteps):
k_num = k + 1
self.step_props[array.name][k_num] = {}
self.velocity_accelerations[array.name][k_num] = {}
for calc in self.calcs:
# get the destination particle array for the calc
dest = calc.dest
updates = calc.updates
nupdates = len(updates)
# the initial properties and accelerations need to be
# defined in the case of integrating calcs
if calc.integrates:
for j in range(nupdates):
update_prop = updates[j]
# define and add the property to the destination array
initial_prop = '_' + update_prop + '0'
dest.add_property( {"name":initial_prop} )
# save the intial property
self.initial_properties[dest.name][update_prop]=initial_prop
# an acceleration needs to be defined for every stage.
for k in range(self.nsteps):
k_num = k + 1
# define and add the acceleration variable
step_prop = '_a_' + update_prop + '_' + str(k_num)
dest.add_property( {"name":step_prop} )
# save the acceleration variable
self.step_props[dest.name][k_num][update_prop] = \
[initial_prop, step_prop]
# tell the calc to use this acceleration
# variable as the argument for the eval phase
dst_writes = calc.dst_writes.get(k_num)
if not dst_writes:
calc.dst_writes[k_num] = []
calc.dst_writes[k_num].append( step_prop )
self.set_rupdate_list()
def reset_accelerations(self, step):
""" Reset the accelerations.
Parameters:
-----------
step : int
The stage of the integrator for which to reset the accelerations.
"""
for array in self.arrays:
zeros = numpy.zeros( array.get_number_of_particles() )
for step_prop in self.step_props[ array.name ][step]:
acc_prop = self.step_props[array.name][step][step_prop][1]
array.set(**{acc_prop:zeros} )
def save_initial_arrays(self):
""" Save the initial arrays. """
for array in self.arrays:
array.copy_over_properties( self.initial_properties[array.name] )
def eval(self):
""" Evaluate the LHS as defined by the calcs.
For evaluations that are time dependant, we rely on the
itnegrator's local time variable to determine what time we're
at.
As an example, an RK2 integrator would perorm two evaluations:
K1 is evaluated at self.local_time = self.time
K2 is evaluated at self.local_time = self.time + dt/2
It is the responsibility of the integrator's `integrate`
method to update the local time variable used by `eval`
"""
calcs = self.calcs
ncalcs = len(calcs)
particles = self.particles
k_num = self.cstep
for i in range(ncalcs):
calc = calcs[i]
# set the time for the destination particle array
calc.dest.set_time(self.local_time)
# Evaluate the calc
if calc.integrates:
if calc.tensor_eval:
calc.tensor_sph( *calc.dst_writes[k_num] )
else:
calc.sph( *calc.dst_writes[k_num] )
else:
calc.sph( *calc.updates )
# ensure all processes have reached this point
particles.barrier()
# update the properties for remote particles
self.rupdate_list[calc.dnum] = [calc.updates]
particles.update_remote_particle_properties(
self.rupdate_list)
# ensure that all processors have evaluated the RHS's
# not likely that this is necessary.
particles.barrier()
def step(self, dt):
""" Step the particle properties. """
# get the current stage of the integration
k_num = self.cstep
for array in self.arrays:
# get the mapping for this array and this stage
to_step = self.step_props[ array.name ][k_num]
for prop in to_step:
initial_prop = to_step[ prop ][0]
step_prop = to_step[ prop ][1]
initial_arr = array.get( initial_prop )
step_arr = array.get( step_prop )
updated_array = initial_arr + step_arr * dt
array.set( **{prop:updated_array} )
# store the acceleration arrays
if prop in ['u','v','w']:
self.velocity_accelerations[array.name][k_num][step_prop] = step_arr
# Increment the step by 1
self.cstep += 1
def get_max_acceleration(self, array, solver):
if solver.count == 1:
return solver.dt
if not ( array in self.arrays ):
raise RuntimeError("Array %s does not belong to me "%array.name)
acc = -numpy.inf
if array.properties.has_key("_a_u_1"):
dim = solver.dim
if dim == 1:
ax = self.step_props[array.name][1]['u'][1]
k1_x = self.velocity_accelerations[array.name][1][ax]
acc = max( acc, numpy.max(numpy.abs(k1_x)) )
elif dim == 2:
ax = self.step_props[array.name][1]['u'][1]
k1_x = self.velocity_accelerations[array.name][1][ax]
ay = self.step_props[array.name][1]['v'][1]
k1_y = self.velocity_accelerations[array.name][1][ay]
acc = max( acc, numpy.max(numpy.sqrt(k1_x*k1_x +\
k1_y*k1_y)) )
elif dim == 3:
ax = self.step_props[array.name][1]['u'][1]
k1_x = self.velocity_accelerations[array.name][1][ax]
ay = self.step_props[array.name][1]['v'][1]
k1_y = self.velocity_accelerations[array.name][1][ay]
az = self.step_props[array.name][1]['w'][1]
k1_z = self.velocity_accelerations[array.name][1][az]
acc = max( acc,
numpy.max(numpy.sqrt(k1_x*k1_x + \
k1_y*k1_y + \
k1_z*k1_z)) )
return acc
def integrate(self, dt):
raise NotImplementedError
##############################################################################
#`EulerIntegrator` class
##############################################################################
class EulerIntegrator(Integrator):
""" Euler integration of the system X' = F(X) with the formula:
X(t + h) = X + h*F(X)
"""
def __init__(self, particles, calcs):
Integrator.__init__(self, particles, calcs)
self.nsteps = 1
def integrate(self, dt):
# set the initial properties
self.save_initial_arrays() # X0 = X(t)
# Euler step
self.reset_accelerations(step=1)
# set the local time to the integrator's time
self.local_time = self.time
self.eval() # F(X) = k1
self.step( dt ) # X(t + h) = X0 + h*k1
self.particles.update()
self.cstep = 1
##############################################################################
#`RK2Integrator` class
##############################################################################
class RK2Integrator(Integrator):
""" RK2 Integration for the system X' = F(X) with the formula:
# Stage 1
K1 = F(X)
X(t + h/2) = X0 + h/2*K1
# Stage 2
K1 = F( X(t+h/2) )
X(t + h) = X0 + h * K1
"""
def __init__(self, particles, calcs):
Integrator.__init__(self, particles, calcs)
self.nsteps = 1
def integrate(self, dt):
# set the initial arrays
self.save_initial_arrays() # X0 = X(t)
#############################################################
# Stage 1
#############################################################
self.reset_accelerations(step=1)
# set the local time to the integrator's time
self.local_time = self.time
self.eval() # K1 = F(X)
self.step(dt/2) # F(X+h/2) = X0 + h/2*K1
self.particles.update()
self.cstep = 1
#############################################################
# Stage 2
#############################################################
self.reset_accelerations(step=1)
# update the local time
self.local_time = self.time + dt/2
self.eval() # K1 = F( X(t+h/2) )
self.step(dt) # F(X+h) = X0 + h*K1
self.particles.update()
self.cstep = 1
##############################################################################
#`RK4Integrator` class
##############################################################################
class RK4Integrator(Integrator):
""" RK4 Integration of a system X' = F(X) using the scheme
# Stage 1
K1 = F(X)
X(t + h/2) = X0 + h/2*K1
# Stage 2
K2 = F( X(t + h/2) )
X(t + h/2) = X0 + h/2*K2
# Stage 3
K3 = F( X(t + h/2) )
X(t + h) = X0 + h*K3
# Stage 4
K4 = F( X(t + h) )
X(t + h) = X0 + h/6 * ( K1 + 2*K2 + 2*K3 + K4 )
"""
def __init__(self, particles, calcs):
Integrator.__init__(self, particles, calcs)
self.nsteps = 4
def final_step(self, dt):
""" Perform the final step for RK4 integration """
fac = 1.0/6.0
for array in self.arrays:
to_step_k1 = self.step_props[array.name][1]
to_step_k2 = self.step_props[array.name][2]
to_step_k3 = self.step_props[array.name][3]
to_step_k4 = self.step_props[array.name][4]
for prop in to_step_k1:
initial_array = array.get( to_step_k1[prop][0] )
k1_array = array.get( to_step_k1[prop][1] )
k2_array = array.get( to_step_k2[prop][1] )
k3_array = array.get( to_step_k3[prop][1] )
k4_array = array.get( to_step_k4[prop][1] )
updated_array = initial_array + fac*dt*(k1_array + \
2*k2_array + \
2*k3_array + \
k4_array)
array.set( **{prop:updated_array} )
def integrate(self, dt):
# save the initial arrays
self.save_initial_arrays() # X0 = X(t)
#############################################################
# Stage 1
#############################################################
self.reset_accelerations(step=1)
# set the local time to the integrator's time
self.local_time = self.time
self.eval() # K1 = F(X)
self.step(dt/2) # X(t + h/2) = X0 + h/2*K1
self.particles.update()
#############################################################
# Stage 2
#############################################################
self.reset_accelerations(step=2)
# update the local time
self.local_time = self.time + dt/2
self.eval() # K2 = F( X(t+h/2) )
self.step(dt/2) # X(t+h/2) = X0 + h/2*K2
self.particles.update()
#############################################################
# Stage 3
#############################################################
self.reset_accelerations(step=3)
# update the local time
self.local_time = self.time + dt/2
self.eval() # K3 = F( X(t+h/2) )
self.step(dt) # X(t+h) = X0 + h*K3
self.particles.update()
#############################################################
# Stage 4
#############################################################
self.reset_accelerations(step=4)
# update the local_time
self.local_time = self.time + dt
self.eval() # K4 = F( X(t+h) )
self.final_step(dt) # X(t + h) = X0 + h/6(K1 + 2K2 + 2K3 + K4)
self.particles.update()
# reset the step counter
self.cstep = 1
##############################################################################
#`PredictorCorrectorIntegrator` class
##############################################################################
class PredictorCorrectorIntegrator(Integrator):
""" Predictor Corrector Integration of a system X' = F(X) using the scheme
Predict:
X(t + h/2) = X0 + h/2 * F(X)
Correct:
X(t + h/2) = X0 + h/2 * F( X(t + h/2) )
Step:
X(t + h) = 2*X(t + h/2) - X0
"""
def __init__(self, particles, calcs):
Integrator.__init__(self, particles, calcs)
self.nsteps = 1
def final_step(self):
""" Perform the final step in the PC integration method """
for array in self.arrays:
to_step = self.step_props[array.name][1]
for prop in to_step:
current_array = array.get( prop )
initial_array = array.get( to_step[prop][0] )
updated_array = 2*current_array - initial_array
array.set( **{prop:updated_array} )
def integrate(self, dt):
# save the initial arrays
self.save_initial_arrays() # X0 = X(t)
############################################################
# Predict
############################################################
self.reset_accelerations(step=1)
# set the local time to the integrator's time
self.local_time = self.time
self.eval() # K1 = F(X)
self.step(dt/2) # X(t+h/2) = X0 + h/2*K1
self.particles.update()
self.cstep = 1
##############################################################
# Correct
##############################################################
self.reset_accelerations(step=1)
# udpate the local time
self.local_time = self.time + dt/2
self.eval() # K1 = F( X(t+h/2) )
self.step(dt/2) # X(t+h/2) = X0 + h/2*K1
self.particles.update()
##############################################################
# Step
##############################################################
self.final_step() # X(t+h) = 2*X(t+h/2) - X0
self.particles.update()
self.cstep = 1
##############################################################################
#`LeapFrogIntegrator` class
##############################################################################
class LeapFrogIntegrator(Integrator):
""" Leap frog integration of a system :
\frac{Dv}{Dt} = F
\frac{Dr}{Dt} = v
\frac{D\rho}{Dt} = D
the prediction step:
vbar = v_0 + h * F_0
r = r_0 + h*v_0 + 0.5 * h * h * F_0
rhobar = rho_0 + h * D_0
correction step:
v = vbar + 0.5*h*(F - F_0)
rho = rhobar + 0.5*h*(D - D_0)
"""
def __init__(self, particles, calcs):
Integrator.__init__(self, particles, calcs)
self.nsteps = 2
def add_correction_for_position(self, dt):
ncalcs = len(self.icalcs)
pos_calc = self.pcalcs[0]
pos_calc_pa = self.arrays[pos_calc.dnum]
pos_calc_updates = pos_calc.updates
for calc in self.icalcs:
if calc.tag == "velocity":
pa = calc.dest
updates = calc.updates
for j in range(calc.nupdates):
update_prop = pos_calc_updates[j]
#k1_prop = self.k1_props['k1'][calc.id][j]
k1_prop = self.k_props[calc.id]['k1'][j]
# the current position
current_arr = pa.get(update_prop)
step_array = pa.get(k1_prop)
updated_array = current_arr + 0.5*dt*dt*step_array
pos_calc_pa.set(**{update_prop:updated_array})
def final_step(self, calc, dt):
#pa = self.arrays[calc.dnum]
pa = calc.dest
updates = calc.updates
for j in range(len(updates)):
update_prop = updates[j]
k1_prop = self.k_props[calc.id]['k1'][j]
k2_prop = self.k_props[calc.id]['k2'][j]
k1_array = pa.get(k1_prop)
k2_array = pa.get(k2_prop)
current_array = pa.get(update_prop)
updated_array = current_array + 0.5*dt*(k2_array - k1_array)
pa.set(**{update_prop:updated_array})
def integrate(self, dt):
# set the initial arrays
self.set_initial_arrays()
# eval and step the non position calcs at the current state
self.do_step(self.ncalcs, dt)
self.cstep = 1
# eval and step the position calcs
self.do_step(self.pcalcs, dt)
# add correction for the positions
self.add_correction_for_position(dt)
#for calc in self.hcalcs:
# calc.sph('h')
# ensure all processors have reached this point, then update
self.particles.barrier()
self.particles.update()
# eval and step the non position calcs
self.eval(self.ncalcs)
for calc in self.icalcs:
self.final_step(calc, dt)
self.cstep = 1
##############################################################################
#`GSPHIntegrator` class
##############################################################################
class GSPHIntegrator(EulerIntegrator):
""" Euler integration of the system X' = F(X) with the formula:
X(t + h) = X + h*F(X)
"""
def step(self, dt):
""" Step the particle properties. """
# get the current stage of the integration
k_num = self.cstep
for array in self.arrays:
# get the mapping for this array and this stage
to_step = self.step_props[ array.name ][k_num]
for prop in to_step:
initial_prop = to_step[ prop ][0]
step_prop = to_step[ prop ][1]
initial_arr = array.get( initial_prop )
step_arr = array.get( step_prop )
updated_array = initial_arr + step_arr * dt
array.set( **{prop:updated_array} )
# store the acceleration arrays
if prop in ['u','v','w']:
self.velocity_accelerations[array.name][k_num][step_prop] = step_arr
vstar = prop + "star"
star = array.get(vstar)
star = initial_arr + 0.5 * step_arr*dt
array.set( **{vstar:star})
# Increment the step by 1
self.cstep += 1
###########################################################################
integration_methods = [('Euler', EulerIntegrator),
('LeapFrog', LeapFrogIntegrator),
('RK2', RK2Integrator),
('RK4', RK4Integrator),
('PredictorCorrector', PredictorCorrectorIntegrator),
]
| Python |
import numpy
class TimeStep(object):
def compute_time_step(self, solver):
return solver.dt
class ViscousTimeStep(TimeStep):
def __init__(self, cfl, co, particles):
self.cfl = cfl
self.co = co
self.particles = particles
def compute_time_step(self, solver):
cfl = self.cfl
co = self.co
# take dt to be some large value
dt = 1
arrays = self.particles.arrays
for array in arrays:
if array.properties.has_key('dt_fac'):
h, dt_fac = array.get('h','dt_fac')
_dt = numpy.min( cfl * h/(co + numpy.max(dt_fac)) )
# choose the minimum time step from all arrays
dt = min( _dt, dt )
return dt
class ViscousAndForceBasedTimeStep(ViscousTimeStep):
def compute_time_step(self, solver):
# compute the time step based on the viscous criterion
dt = ViscousTimeStep.compute_time_step(self, solver)
# compute the acceleration based time step
integrator = solver.integrator
arrays = self.particles.arrays
for array in arrays:
if array.properties.has_key("_a_u_1"):
fmax = integrator.get_max_acceleration(array, solver)
h = array.get("h")
_dt = self.cfl * numpy.min( numpy.sqrt(h/fmax) )
dt = min( dt, _dt )
return dt
class VelocityBasedTimeStep(object):
def __init__(self, particles, cfl=0.3,):
self.cfl = cfl
self.particles = particles
def compute_time_step(self, solver):
v = float('inf')
for pa in solver.particles.arrays:
val = min(pa.h/(pa.cs+(pa.u**2+pa.v**2+pa.w**2)**0.5))
if val < v:
v = val
return self.cfl*v
| Python |
""" SImple plotting for the data """
from utils import get_pickled_data
import numpy
import pysph.base.api as base
#import visvis as vv
class ParticleInformation(object):
def __init__(self, fname, nprocs, array_name, time,
load_neighbors=True, load_cells=True):
self.fname = fname
self.nprocs = nprocs
self.array_name = array_name
self.time = time
self.load_neighbors = load_neighbors
self.load_cells = load_cells
self.neighbors = {}
self.cell_ids = {}
self.cell_data = {}
self.particle_data = {}
self.particle_positions = {}
self.particle_idx_by_proc = {}
self.particle_positions_by_proc = {}
self.np = -1
# load the data
if load_neighbors:
self._load_neighbor_data()
if load_cells:
self._load_cell_data()
self._load_particle_data(fname)
def _load_neighbor_data(self):
for i in range(self.nprocs):
fname = "neighbors_" + str(i) + "_" + self.array_name
fname += "_" + str(self.time)
nbr_data = get_pickled_data(fname)
for idx, data in nbr_data.iteritems():
self.neighbors[idx] = data["neighbors"]
self.cell_ids[idx] = data["cid"]
def _load_cell_data(self):
for i in range(self.nprocs):
fname = "cells_" + str(i) + "_" + str(self.time)
cell_data = get_pickled_data(fname)
self.cell_data.update(cell_data)
def _load_particle_data(self, fname):
np = 0
for i in range(self.nprocs):
self.particle_positions_by_proc[i] = {}
_fname = fname + "_" + str(i) + "_" + self.array_name + "_"
_fname += str(self.time) + ".npz"
data = numpy.load(_fname)
self.particle_data[i] = data
self.particle_positions_by_proc[i]['x'] = data['x']
if 'y' in data.files:
self.particle_positions_by_proc[i]['y'] = data['y']
if 'z' in data.files:
self.particle_positions_by_proc[i]['z'] = data['z']
idx = data['idx']
self.particle_idx_by_proc[i] = idx
for j in range(data['np']):
particle_idx = idx[j]
self.particle_positions[particle_idx] = {}
self.particle_positions[particle_idx]['x'] = data['x'][j]
if 'y' in data.files:
self.particle_positions[particle_idx]['y'] = data['y'][j]
if 'z' in data.files:
self.particle_positions[particle_idx]['z'] = data['z'][j]
np += data['np']
self.np = np
def get_particle_position(self, idx):
if idx < 0 or idx > self.np:
raise RunTimeError, "Invalid Particle Index!"
return base.Point(*self.particle_positions[idx].values())
def get_cid_for_particle(self, idx):
if not self.load_cells:
return
self.check_particle_id(idx)
return self.cell_ids[idx]
def get_particles_in_cell(self, cid):
""" Return the particles in the same cell """
if not self.load_cells:
return
cell_data = self.cell_data[cid]
particle_indices = cell_data["positions"]["idx"]
return particle_indices
def get_particle_info(self, idx):
if not self.load_neighbors:
return
self.check_particle_id(idx)
cid = self.get_cid_for_particle(idx)
neighbors = self.neighbors[idx].get_npy_array()
position = self.get_particle_position(idx)
info = {'cid':cid, "position":position, 'neighbors':neighbors}
for rank, idx_list in self.particle_idx_by_proc.iteritems():
if idx in idx_list:
info["rank"] = rank
break
return info
def get_cells_for_proc(self, proc):
if proc < 0 or proc > self.nprocs:
raise RunTimeError, "Invalid pid!"
particle_ids = self.particle_idx_by_proc[proc]
return set([self.cell_ids[i] for i in particle_ids])
def get_coordinates_for_cell(self, cid):
centroid = self.cell_data[cid]['centroid']
cell_size = self.particle_data[0]['cell_size']
x1 = centroid[0] - 0.5 * cell_size
y1 = centroid[1] - 0.5 * cell_size
x2 = x1 + cell_size
y2 = y1
x3 = x2
y3 = y2 + cell_size
x4 = x1
y4 = y3
x = numpy.array([x1,x2,x3,x4,x1])
y = numpy.array([y1,y2,y3,y4,y1])
return numpy.rec.fromarrays([x,y], names="x,y")
def check_particle_id(self, idx):
if idx < 0 or idx > self.np:
raise RunTimeError, "Invalid Particle Index!"
| Python |
from integrator import Integrator
from cl_utils import HAS_CL, get_pysph_root, get_cl_include,\
get_scalar_buffer, cl_read, get_real, enqueue_copy
if HAS_CL:
import pyopencl as cl
from os import path
import numpy
class CLIntegrator(Integrator):
def setup_integrator(self, context):
""" Setup the additional particle arrays for integration.
Parameters:
-----------
context -- the OpenCL context
setup_cl on the calcs must be called when all particle
properties on the particle array are created. This is
important as all device buffers will created.
"""
Integrator.setup_integrator(self)
self.setup_cl(context)
self.cl_precision = self.particles.get_cl_precision()
#self.step_props = ['_tmpx', '_tmpy', '_tmpz']
def setup_cl(self, context):
""" OpenCL setup """
self.context = context
for calc in self.calcs:
calc.setup_cl(context)
# setup the OpenCL Program
root = get_pysph_root()
src = cl_read(path.join(root, 'solver/integrator.cl'),
self.particles.get_cl_precision())
self.program = cl.Program(context, src).build(get_cl_include())
def reset_accelerations(self, step):
for array in self.arrays:
queue = array.queue
np = array.get_number_of_particles()
to_step = self.step_props[array.name][step]
for prop in to_step:
acc_prop = to_step[prop][1]
acc_buffer = array.get_cl_buffer( acc_prop )
self.program.set_to_zero(queue, (np,), (1,), acc_buffer).wait()
def save_initial_arrays(self):
""" Set the initial arrays for each calc
The initial array is the update property of a calc appended with _0
Note that multiple calcs can update the same property and this
will not replicate the creation of the initial arrays.
In OpenCL, we call the EnqueueCopyBuffer with source as the
current update property and destination as the initial
property array.
"""
for array in self.arrays:
queue = array.queue
initial_props = self.initial_properties[ array.name ]
for prop in initial_props:
src = array.get_cl_buffer( prop )
dst = array.get_cl_buffer( initial_props[prop] )
enqueue_copy(queue=queue, src=src, dst=dst)
# ncalcs = len(calcs)
# for i in range(ncalcs):
# calc = calcs[i]
# queue = calc.queue
# if calc.integrates:
# updates = calc.updates
# nupdates = len(updates)
# pa = self.arrays[calc.dnum]
# for j in range(nupdates):
# update_prop = updates[j]
# initial_prop = self.initial_props[calc.id][j]
# update_prop_buffer = pa.get_cl_buffer(update_prop)
# initial_prop_buffer = pa.get_cl_buffer(initial_prop)
# enqueue_copy(queue=queue, src=update_prop_buffer,
# dst=initial_prop_buffer)
# def reset_current_buffers(self, calcs):
# """ Reset the current arrays """
# ncalcs = len(calcs)
# for i in range(ncalcs):
# calc = calcs[i]
# queue = calc.queue
# if calc.integrates:
# updates = calc.updates
# nupdates = len(updates)
# pa = self.arrays[calc.dnum]
# for j in range(nupdates):
# update_prop = updates[j]
# initial_prop = self.initial_props[calc.id][j]
# # get the device buffers
# update_prop_buffer = pa.get_cl_buffer(update_prop)
# initial_prop_buffer = pa.get_cl_buffer(initial_prop)
# # reset the current property to the initial array
# enqueue_copy(queue=queue,src=initial_prop_buffer,
# dst=update_prop_buffer)
def eval(self):
""" Evaluate each calc and store in the k list if necessary """
calcs = self.calcs
ncalcs = len(calcs)
particles = self.particles
k_num = self.cstep
for i in range(ncalcs):
calc = calcs[i]
queue = calc.queue
updates = calc.updates
nupdates = calc.nupdates
# get the destination particle array for this calc
pa = dest = calc.dest
#print "Evaluating calc:: ", calc.id
if calc.integrates:
calc.sph( *calc.dst_writes[k_num] )
else:
calc.sph( *calc.updates )
#particles.barrier()
#self.rupdate_list[calc.dnum] = [update_prop]
#particles.update_remote_particle_properties(
# self.rupdate_list)
#ensure that the eval phase is completed for all processes
particles.barrier()
def step(self, dt):
""" Perform stepping for the integrating calcs """
cl_dt = get_real(dt, self.cl_precision)
# get the current stage of the integration
k_num = self.cstep
for array in self.arrays:
# get the number of particles
np = array.get_number_of_particles()
# get the command queue for the array
queue = array.queue
# get the mapping for this array and this stage
to_step = self.step_props[ array.name ][k_num]
for prop in to_step:
initial_prop = to_step[ prop ][0]
step_prop = to_step[ prop ][1]
prop_buffer = array.get_cl_buffer( prop )
step_buffer = array.get_cl_buffer( step_prop )
initial_buffer = array.get_cl_buffer( initial_prop )
self.program.step_array(queue, (np,1,1), (1,1,1),
initial_buffer, step_buffer,
prop_buffer, cl_dt)
self.cstep += 1
# for i in range(ncalcs):
# calc = calcs[i]
# queue = calc.queue
# if calc.integrates:
# updates = calc.updates
# nupdates = calc.nupdates
# # get the destination particle array for this calc
# pa = self.arrays[calc.dnum]
# np = pa.get_number_of_particles()
# for j in range(nupdates):
# update_prop = updates[j]
# k_prop = self.k_props[calc.id][k_num][j]
# current_buffer = pa.get_cl_buffer(update_prop)
# step_buffer = pa.get_cl_buffer(k_prop)
# tmp_buffer = pa.get_cl_buffer('_tmpx')
# self.program.step_array(queue, (np,1,1), (1,1,1),
# current_buffer, step_buffer,
# tmp_buffer, cl_dt)
# enqueue_copy(queue, src=tmp_buffer,
# dest=current_buffer)
# pass
# pass
# # Increment the step by 1
# self.cstep += 1
##############################################################################
#`CLEulerIntegrator` class
##############################################################################
class CLEulerIntegrator(CLIntegrator):
""" Euler integration of the system X' = F(X) with the formula:
X(t + h) = X + h*F(X)
"""
def __init__(self, particles, calcs):
CLIntegrator.__init__(self, particles, calcs)
self.nsteps = 1
def integrate(self, dt):
# set the initial buffers
self.save_initial_arrays()
# Euler step
self.reset_accelerations(step=1)
self.eval()
self.step(dt)
self.particles.update()
self.cstep = 1
##############################################################################
#`CLRK2Integrator` class
##############################################################################
class CLRK2Integrator(CLIntegrator):
""" RK2 Integration for the system X' = F(X) with the formula:
# Stage 1
K1 = F(X)
X(t + h/2) = X0 + h/2*K1
# Stage 2
K1 = F( X(t+h/2) )
X(t + h) = X0 + h * K1
"""
def __init__(self, particles, calcs):
CLIntegrator.__init__(self, particles, calcs)
self.nsteps = 1
def integrate(self, dt):
# set the initial arrays
self.save_initial_arrays() # X0 = X(t)
#############################################################
# Stage 1
#############################################################
self.reset_accelerations(step=1)
self.eval() # K1 = F(X)
self.step(dt/2) # F(X+h/2) = X0 + h/2*K1
self.particles.update()
self.cstep = 1
#############################################################
# Stage 2
#############################################################
self.reset_accelerations(step=1)
self.eval() # K1 = F( X(t+h/2) )
self.step(dt) # F(X+h) = X0 + h*K1
self.particles.update()
self.cstep = 1
##############################################################################
#`CLPredictorCorrectorIntegrator` class
##############################################################################
class CLPredictorCorrectorIntegrator(CLIntegrator):
""" Predictor Corrector Integration of a system X' = F(X) using the scheme
Predict:
X(t + h/2) = X0 + h/2 * F(X)
Correct:
X(t + h/2) = X0 + h/2 * F( X(t + h/2) )
Step:
X(t + h) = 2*X(t + h/2) - X0
"""
def __init__(self, particles, calcs):
CLIntegrator.__init__(self, particles, calcs)
self.nsteps = 1
def final_step(self):
for array in self.arrays:
to_step = self.step_props[array.name][1]
for prop in to_step:
current_buffer = array.get_cl_buffer( prop )
initial_buffer = array.get_cl_buffer( to_step[prop][0] )
self.program.pc_final_step( queue, (np,), (1,),
current_buffer,
initial_buffer).wait()
def integrate(self, dt):
# save the initial arrays
self.save_initial_arrays() # X0 = X(t)
############################################################
# Predict
############################################################
self.reset_accelerations(step=1)
self.eval() # K1 = F(X)
self.step(dt/2) # X(t+h/2) = X0 + h/2*K1
self.particles.update()
self.cstep = 1
##############################################################
# Correct
##############################################################
self.reset_accelerations(step=1)
self.eval() # K1 = F( X(t+h/2) )
self.step(dt/2) # X(t+h/2) = X0 + h/2*K1
self.particles.update()
##############################################################
# Step
##############################################################
self.final_step(dt) # X(t+h) = 2*X(t+h/2) - X0
self.particles.update()
self.cstep = 1
| Python |
""" Post step functions for the solver """
import pickle
import os
import pysph.base.api as base
from pysph.base.cell import py_find_cell_id
class SaveCellManagerData(object):
"""Post-step function to save the cell manager's data.
Two files are created, 'neighbors' contains partile neighbor
information as returned by the neighbor locator. For each
particle, a LongArray for it's neighbor indices are stored.
The second file
'cells', holds cell data for each cell (partilce indices, coordinates)
"""
def __init__(self, rank = 0, path=None, count=10):
self.rank = rank
self.count = count
if path:
self.path = path
else:
self.path = "."
def eval(self, solver):
if not ((solver.count % self.count) == 0):
return
particles = solver.particles
time = solver.t
nnps = particles.nnps_manager
locator_cache = nnps.particle_locator_cache
num_locs = len(locator_cache)
locators = locator_cache.values()
fname_base = os.path.join(self.path+"/neighbors_"+str(self.rank))
cell_manager = particles.cell_manager
cell_size = cell_manager.cell_size
neighbor_idx = {}
for i in range(num_locs):
loc = locators[i]
dest = loc.dest
src = loc.source
particle_indices = dest.get('idx')
x, y, z = dest.get("x", "y", "z")
neighbor_idx[dest.name + '-' + src.name] = {}
d = neighbor_idx[dest.name + '-' + src.name]
nrp = dest.num_real_particles
for j in range(nrp):
neighbors = loc.py_get_nearest_particles(j)
temp = dest.extract_particles(neighbors)
particle_idx = particle_indices[j]
pnt = base.Point(x[j], y[j], z[j])
cid = py_find_cell_id(pnt, cell_size)
idx = temp.get_carray("idx")
d[particle_idx] = {'neighbors':idx, 'cid':cid}
fname = fname_base + "_" + dest.name + "_" + str(solver.count)
# save particle neighbor information.
f = open(fname, 'w')
pickle.dump(neighbor_idx, f)
f.close()
fname_cells = os.path.join(self.path+"/cells_"+str(self.rank))
fname_cells += "_" + str(solver.count)
# ask the cell manager to save the particle representation
cell_manager.get_particle_representation(fname_cells)
class CFLTimeStepFunction(object):
def __init__(self, CFL=0.3):
self.cfl = CFL
def eval(self, solver):
v = float('inf')
for pa in solver.particles.arrays:
val = min(pa.h/(pa.cs+(pa.u**2+pa.v**2+pa.w**2)**0.5))
if val < v:
v = val
solver.dt = self.cfl*v
| Python |
# Standard imports.
import logging, os
from optparse import OptionParser, OptionGroup, Option
from os.path import basename, splitext, abspath
import sys
from utils import mkdir
# PySPH imports.
from pysph.base.particles import Particles, CLParticles, ParticleArray
from pysph.solver.controller import CommandManager
from pysph.solver.integrator import integration_methods
from pysph.base.nnps import NeighborLocatorType as LocatorType
import pysph.base.kernels as kernels
# MPI conditional imports
HAS_MPI = True
try:
from mpi4py import MPI
except ImportError:
HAS_MPI = False
else:
from pysph.parallel.load_balancer import LoadBalancer
from pysph.parallel.simple_parallel_manager import \
SimpleParallelManager
from pysph.parallel.parallel_cell import ParallelCellManager
from pysph.parallel.simple_block_manager import SimpleBlockManager
def list_option_callback(option, opt, value, parser):
val = value.split(',')
val.extend( parser.rargs )
setattr( parser.values, option.dest, val )
##############################################################################
# `Application` class.
##############################################################################
class Application(object):
""" Class used by any SPH application """
def __init__(self, load_balance=True, fname=None):
""" Constructor
Parameters
----------
load_balance : A boolean which determines if automatic load
balancing is to be performed or not
"""
self._solver = None
self._parallel_manager = None
# The initial distribution method name to pass to the LoadBalancer's
# `distribute_particles` method, can be one of ('auto', 'sfc', 'single'
# etc.)
self._distr_func = 'auto'
self.load_balance = load_balance
if fname == None:
fname = sys.argv[0].split('.')[0]
self.fname = fname
self.args = sys.argv[1:]
# MPI related vars.
self.comm = None
self.num_procs = 1
self.rank = 0
if HAS_MPI:
self.comm = comm = MPI.COMM_WORLD
self.num_procs = comm.Get_size()
self.rank = comm.Get_rank()
self._log_levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
'none': None}
self._setup_optparse()
self.path = None
def _setup_optparse(self):
usage = """
%prog [options]
Note that you may run this program via MPI and the run will be
automatically parallelized. To do this run::
$ mpirun -n 4 /path/to/your/python %prog [options]
Replace '4' above with the number of processors you have.
Below are the options you may pass.
"""
parser = OptionParser(usage)
self.opt_parse = parser
# Add some default options.
parser.add_option("-b", "--no-load-balance", action="store_true",
dest="no_load_balance", default=False,
help="Do not perform automatic load balancing "\
"for parallel runs.")
# -v
valid_vals = "Valid values: %s"%self._log_levels.keys()
parser.add_option("-v", "--loglevel", action="store",
type="string",
dest="loglevel",
default='warning',
help="Log-level to use for log messages. " +
valid_vals)
# --logfile
parser.add_option("--logfile", action="store",
type="string",
dest="logfile",
default=None,
help="Log file to use for logging, set to "+
"empty ('') for no file logging.")
# -l
parser.add_option("-l", "--print-log", action="store_true",
dest="print_log", default=False,
help="Print log messages to stderr.")
# --final-time
parser.add_option("--final-time", action="store",
type="float",
dest="final_time",
default=None,
help="Total time for the simulation.")
# --timestep
parser.add_option("--timestep", action="store",
type="float",
dest="time_step",
default=None,
help="Timestep to use for the simulation.")
# -q/--quiet.
parser.add_option("-q", "--quiet", action="store_true",
dest="quiet", default=False,
help="Do not print any progress information.")
# -o/ --output
parser.add_option("-o", "--output", action="store",
dest="output", default=self.fname,
help="File name to use for output")
# --output-freq.
parser.add_option("--freq", action="store",
dest="freq", default=20, type="int",
help="Printing frequency for the output")
# -d/ --detailed-output.
parser.add_option("-d", "--detailed-output", action="store_true",
dest="detailed_output", default=False,
help="Dump detailed output.")
# --directory
parser.add_option("--directory", action="store",
dest="output_dir", default=self.fname+'_output',
help="Dump output in the specified directory.")
# --kernel
parser.add_option("--kernel", action="store",
dest="kernel", type="int",
help="%-55s"%"The kernel function to use:"+
''.join(['%d - %-51s'%(d,s) for d,s in
enumerate(kernels.kernel_names)]))
# --hks
parser.add_option("--hks", action="store_true",
dest="hks", default=True,
help="""Perform the Hrenquist and Katz kernel
normalization for variable smothing lengths.""")
# -k/--kernel-correction
parser.add_option("-k", "--kernel-correction", action="store",
dest="kernel_correction", type="int",
default=-1,
help="""Use Kernel correction.
0 - Bonnet and Lok correction
1 - RKPM first order correction""")
# --integration
parser.add_option("--integration", action="store",
dest="integration", type="int",
help="%-55s"%"The integration method to use:"+
''.join(['%d - %-51s'%(d,s[0]) for d,s in
enumerate(integration_methods)]))
# --cl
parser.add_option("--cl", action="store_true", dest="with_cl",
default=False, help=""" Use OpenCL to run the
simulation on an appropriate device """)
# --parallel-mode
parser.add_option("--parallel-mode", action="store",
dest="parallel_mode", default="simple",
help = """Use 'simple' (which shares all particles)
or 'auto' (which does block based parallel
distribution of particles).""")
# --parallel-output-mode
parser.add_option("--parallel-output-mode", action="store",
dest="parallel_output_mode", default="collected",
help="""Use 'collected' to dump one output at
root or 'distributed' for every processor. """)
# solver interfaces
interfaces = OptionGroup(parser, "Interfaces",
"Add interfaces to the solver")
interfaces.add_option("--interactive", action="store_true",
dest="cmd_line", default=False,
help=("Add an interactive commandline interface "
"to the solver"))
interfaces.add_option("--xml-rpc", action="store",
dest="xml_rpc", metavar='[HOST:]PORT',
help=("Add an XML-RPC interface to the solver; "
"HOST=0.0.0.0 by default"))
interfaces.add_option("--multiproc", action="store",
dest="multiproc", metavar='[[AUTHKEY@]HOST:]PORT[+]',
default="pysph@0.0.0.0:8800+",
help=("Add a python multiprocessing interface "
"to the solver; "
"AUTHKEY=pysph, HOST=0.0.0.0, PORT=8800+ by"
" default (8800+ means first available port "
"number 8800 onwards)"))
interfaces.add_option("--no-multiproc", action="store_const",
dest="multiproc", const=None,
help=("Disable multiprocessing interface "
"to the solver"))
parser.add_option_group(interfaces)
# solver job resume support
parser.add_option('--resume', action='store', dest='resume',
metavar='COUNT|count|?',
help=('Resume solver from specified time (as stored '
'in the data in output directory); count chooses '
'a particular file; ? lists all '
'available files')
)
def _process_command_line(self):
""" Parse any command line arguments.
Add any new options before this is called. This also sets up
the logging automatically.
"""
(options, args) = self.opt_parse.parse_args(self.args)
self.options = options
# Setup logging based on command line options.
level = self._log_levels[options.loglevel]
#save the path where we want to dump output
self.path = abspath(options.output_dir)
mkdir(self.path)
if level is not None:
self._setup_logging(options.logfile, level,
options.print_log)
def _setup_logging(self, filename=None, loglevel=logging.WARNING,
stream=True):
""" Setup logging for the application.
Parameters
----------
filename : The filename to log messages to. If this is None
a filename is automatically chosen and if it is an
empty string, no file is used
loglevel : The logging level
stream : Boolean indicating if logging is also printed on
stderr
"""
# logging setup
self.logger = logger = logging.getLogger()
logger.setLevel(loglevel)
# Setup the log file.
if filename is None:
filename = splitext(basename(sys.argv[0]))[0] + '.log'
if len(filename) > 0:
lfn = os.path.join(self.path,filename)
if self.num_procs > 1:
logging.basicConfig(level=loglevel, filename=lfn,
filemode='w')
if stream:
logger.addHandler(logging.StreamHandler())
def _create_particles(self, variable_h, callable, min_cell_size=-1,
*args, **kw):
""" Create particles given a callable and any arguments to it.
This will also automatically distribute the particles among
processors if this is a parallel run. Returns the `Particles`
instance that is created.
"""
num_procs = self.num_procs
rank = self.rank
data = None
if rank == 0:
# Only master creates the particles.
pa = callable(*args, **kw)
distr_func = self._distr_func
if num_procs > 1:
# Use the offline load-balancer to distribute the data
# initially. Negative cell size forces automatic computation.
data = LoadBalancer.distribute_particles(pa,
num_procs=num_procs,
block_size=-1,
distr_func=distr_func)
if num_procs > 1:
# Now scatter the distributed data.
pa = self.comm.scatter(data, root=0)
self.particle_array = pa
in_parallel = num_procs > 1
if isinstance(pa, (ParticleArray,)):
pa = [pa]
no_load_balance = self.options.no_load_balance
if no_load_balance:
self.load_balance = False
else:
self.load_balance = True
if self.options.with_cl:
cl_locator_type = kw.get('cl_locator_type', None)
domain_manager_type = kw.get('domain_manager_type', None)
if cl_locator_type and domain_manager_type:
self.particles = CLParticles(
arrays=pa, cl_locator_type=cl_locator_type,
domain_manager_type=domain_manager_type)
else:
self.particles = CLParticles(arrays=pa)
else:
locator_type = kw.get('locator_type', None)
if locator_type:
if locator_type not in [LocatorType.NSquareNeighborLocator,
LocatorType.SPHNeighborLocator]:
msg = "locator type %d not understood"%(locator_type)
raise RuntimeError(msg)
else:
locator_type = LocatorType.SPHNeighborLocator
self.particles = Particles(arrays=pa, variable_h=variable_h,
in_parallel=in_parallel,
load_balancing=self.load_balance,
update_particles=True,
min_cell_size=min_cell_size,
locator_type=locator_type)
return self.particles
######################################################################
# Public interface.
######################################################################
def set_args(self, args):
self.args = args
def add_option(self, opt):
""" Add an Option/OptionGroup or their list to OptionParser """
if isinstance(opt, OptionGroup):
self.opt_parse.add_option_group(opt)
elif isinstance(opt, Option):
self.opt_parse.add_option(opt)
else:
# assume a list of Option/OptionGroup
for o in opt:
self.add_option(o)
def setup(self, solver, create_particles=None,
variable_h=False, min_cell_size=-1, **kwargs):
"""Set the application's solver. This will call the solver's
`setup` method.
The following solver options are set:
dt -- the time step for the solver
tf -- the final time for the simulationl
fname -- the file name for output file printing
freq -- the output print frequency
level -- the output detail level
dir -- the output directory
hks -- Hernquist and Katz kernel correction
eps -- the xsph correction factor
with_cl -- OpenCL related initializations
integration_type -- The integration method
default_kernel -- the default kernel to use for operations
Parameters
----------
create_particles : callable or None
If supplied, particles will be created for the solver using the
particle arrays returned by the callable. Else particles for the
solver need to be set before calling this method
variable_h : bool
If the particles created using create_particles have variable h
min_cell_size : float
minimum cell size for particles created using min_cell_size
"""
self._solver = solver
solver_opts = solver.get_options(self.opt_parse)
if solver_opts is not None:
self.add_option(solver_opts)
self._process_command_line()
options = self.options
if self.num_procs > 1:
if options.parallel_mode == 'simple':
self.set_parallel_manager(SimpleParallelManager())
if options.parallel_mode == "block":
self.set_parallel_manager( SimpleBlockManager() )
if create_particles:
self._create_particles(variable_h, create_particles, min_cell_size,
**kwargs)
pm = self._parallel_manager
if pm is not None:
self.particles.parallel_manager = pm
pm.initialize(self.particles)
self._solver.setup_solver(options.__dict__)
dt = options.time_step
if dt is not None:
solver.set_time_step(dt)
tf = options.final_time
if tf is not None:
solver.set_final_time(tf)
#setup the solver output file name
fname = options.output
if HAS_MPI:
comm = self.comm
rank = self.rank
if not self.num_procs == 0:
fname += '_' + str(rank)
# set the rank for the solver
solver.rank = self.rank
solver.pid = self.rank
solver.comm = self.comm
# set the in parallel flag for the solver
if self.num_procs > 1:
solver.in_parallel = True
# output file name
solver.set_output_fname(fname)
# output print frequency
solver.set_print_freq(options.freq)
# output printing level (default is not detailed)
solver.set_output_printing_level(options.detailed_output)
# output directory
solver.set_output_directory(abspath(options.output_dir))
# set parallel output mode
solver.set_parallel_output_mode(options.parallel_output_mode)
# default kernel
if options.kernel is not None:
solver.default_kernel = getattr(kernels,
kernels.kernel_names[options.kernel])(dim=solver.dim)
# Hernquist and Katz kernel correction
# TODO. Fix the Kernel and Gradient Correction
#solver.set_kernel_correction(options.kernel_correction)
# OpenCL setup for the solver
solver.set_cl(options.with_cl)
if options.resume is not None:
solver.particles = self.particles # needed to be able to load particles
r = solver.load_output(options.resume)
if r is not None:
print 'available files for resume:'
print r
sys.exit(0)
if options.integration is not None:
solver.integrator_type =integration_methods[options.integration][1]
# setup the solver
solver.setup(self.particles)
# print options for the solver
#solver.set_arrays_to_print(options.arrays_to_print)
# add solver interfaces
self.command_manager = CommandManager(solver, self.comm)
solver.set_command_handler(self.command_manager.execute_commands)
if self.rank == 0:
# commandline interface
if options.cmd_line:
from pysph.solver.solver_interfaces import CommandlineInterface
self.command_manager.add_interface(CommandlineInterface().start)
# XML-RPC interface
if options.xml_rpc:
from pysph.solver.solver_interfaces import XMLRPCInterface
addr = options.xml_rpc
idx = addr.find(':')
host = "0.0.0.0" if idx == -1 else addr[:idx]
port = int(addr[idx+1:])
self.command_manager.add_interface(XMLRPCInterface((host,port)).start)
# python MultiProcessing interface
if options.multiproc:
from pysph.solver.solver_interfaces import MultiprocessingInterface
addr = options.multiproc
idx = addr.find('@')
authkey = "pysph" if idx == -1 else addr[:idx]
addr = addr[idx+1:]
idx = addr.find(':')
host = "0.0.0.0" if idx == -1 else addr[:idx]
port = addr[idx+1:]
if port[-1] == '+':
try_next_port = True
port = port[:-1]
else:
try_next_port = False
port = int(port)
interface = MultiprocessingInterface((host,port), authkey,
try_next_port)
self.command_manager.add_interface(interface.start)
self.logger.info('started multiprocessing interface on %s'%(
interface.address,))
def run(self):
"""Run the application."""
self._solver.solve(not self.options.quiet)
def set_parallel_manager(self, mgr):
"""Set the parallel manager class to use."""
self._parallel_manager = mgr
if isinstance(mgr, SimpleParallelManager):
self._distr_func = 'auto'
| Python |
""" A simple shock tube solver """
from optparse import OptionGroup, Option
import numpy
import pysph.base.api as base
import pysph.sph.api as sph
from solver import Solver
from sph_equation import SPHOperation, SPHIntegration
from integrator import GSPHIntegrator
Fluids = base.Fluid
Solids = base.Solid
Boundary = base.Boundary
def standard_shock_tube_data(name="", type=0, cl_precision="double",
nl=320, nr=80, smoothing_length=None, **kwargs):
""" Standard 400 particles shock tube problem """
dxl = 0.6/nl
dxr = dxl*4
x = numpy.ones(nl+nr)
x[:nl] = numpy.arange(-0.6, -dxl+1e-10, dxl)
x[nl:] = numpy.arange(dxr, 0.6+1e-10, dxr)
m = numpy.ones_like(x)*dxl
h = numpy.ones_like(x)*2*dxr
if smoothing_length:
h = numpy.ones_like(x) * smoothing_length
rho = numpy.ones_like(x)
rho[nl:] = 0.25
u = numpy.zeros_like(x)
e = numpy.ones_like(x)
e[:nl] = 2.5
e[nl:] = 1.795
p = 0.4*rho*e
cs = numpy.sqrt(1.4*p/rho)
idx = numpy.arange(nl+nr)
return base.get_particle_array(name=name,x=x,m=m,h=h,rho=rho,p=p,e=e,
cs=cs,type=type, idx=idx,
cl_precision=cl_precision)
############################################################################
# `ShockTubeSolver` class
############################################################################
class ShockTubeSolver(Solver):
def __init__(self, dim, integrator_type, alpha=1.0, beta=1.0,
gamma=1.4, xsph_eps=0):
self.dim = dim
self.defaults = dict(alpha=alpha,
beta=beta,
gamma=gamma,
xsph_eps=xsph_eps)
# base class constructor
Solver.__init__(self, dim, integrator_type)
def get_options(self, opt_parser):
opt = OptionGroup(opt_parser, "ShockTubeSolver options")
opt.add_option("--alpha", action="store", type="float",
dest="alpha", default=self.defaults["alpha"],
help="Set the artificial viscosity parameter alpha")
opt.add_option("--beta", action="store", type="float",
dest="beta", default=self.defaults["alpha"],
help="Set the artificial viscosity parameter beta")
opt.add_option("--gamma", action="store", type="float",
dest="gamma", default=self.defaults["gamma"],
help="Set the ratio of specific heats gamma")
opt.add_option("--xsph-eps", action="store", type="float",
dest="xsph_eps", default=self.defaults.get("xsph_eps"),
help="Constant for XSPH")
return opt
def setup_solver(self, options=None):
options = options or self.defaults
gamma = options.get("gamma")
alpha = options.get("alpha")
beta = options.get("beta")
hks = options.get("hks")
xsph_eps = options.get("xsph_eps")
vel_updates=["u","v","w"][:self.dim]
pos_updates=["x","y","z"][:self.dim]
###################################################################
# Add the operations
###################################################################
# Summation density
self.add_operation(SPHOperation(
sph.SPHRho.withargs(hks=hks),
on_types=[Fluids], from_types=[Fluids, base.Boundary],
updates=['rho'], id = 'density')
)
# Equation of state
self.add_operation(SPHOperation(
sph.IdealGasEquation.withargs(gamma=gamma),
on_types = [Fluids],
updates=['p', 'cs'],
id='eos')
)
# Momentum equation
self.add_operation(SPHIntegration(
sph.MomentumEquation.withargs(alpha=alpha, beta=beta, hks=hks),
on_types=[Fluids], from_types=[Fluids, base.Boundary],
updates=vel_updates,
id='mom')
)
# XSPH correction : defaults to eps = 0
self.add_operation(SPHIntegration(
sph.XSPHCorrection.withargs(eps=xsph_eps),
on_types=[base.Fluid,], from_types=[base.Boundary, base.Fluid],
updates=vel_updates,
id="xsph")
)
# Energy Equation
self.add_operation(SPHIntegration(
sph.EnergyEquation.withargs(hks=hks),
from_types=[Fluids, base.Boundary],
on_types=[Fluids], updates=['e'],
id='enr')
)
# Position Step
self.add_operation(SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[Fluids,],
updates=pos_updates,
id="step")
)
############################################################################
# `ADKEShockTubeSolver` class
############################################################################
class ADKEShockTubeSolver(Solver):
def __init__(self, dim, integrator_type, h0, eps, k, g1, g2, alpha, beta,
gamma=1.4, xsph_eps=0,
kernel=base.CubicSplineKernel, hks=True):
# solver dimension
self.dim = dim
# Hernquist and Katz normalization
self.hks = hks
# the SPH kernel to use
self.kernel = kernel(dim)
self.defaults = dict(alpha=alpha,
beta=beta,
gamma=gamma,
adke_eps=eps,
adke_k=k,
adke_h0=h0,
g1=g1,
g2=g2,
xsph_eps=xsph_eps)
# base class constructor
Solver.__init__(self, dim, integrator_type)
def get_options(self, opt_parser):
opt = OptionGroup(opt_parser, "ADKEShockTubeSolver options")
opt.add_option("--alpha", action="store", type="float",
dest="alpha", default=self.defaults["alpha"],
help="Set the artificial viscosity parameter alpha")
opt.add_option("--beta", action="store", type="float",
dest="beta", default=self.defaults["alpha"],
help="Set the artificial viscosity parameter beta")
opt.add_option("--gamma", action="store", type="float",
dest="gamma", default=self.defaults["gamma"],
help="Set the ratio of specific heats gamma")
opt.add_option("--adke-eps", action="store", type="float",
dest="adke_eps", default=self.defaults.get("adke_eps"),
help="Sensitivity parameter eps for the ADKE pocedure")
opt.add_option("--adke-k", action="store", type="float",
dest="adke_k", default=self.defaults.get("adke_k"),
help="Scaling parameter k for the ADKE pocedure")
opt.add_option("--adke-h0", action="store", type="float",
dest="adke_h0", default=self.defaults.get("adke_h0"),
help="Initial smoothing length h0 for the ADKE pocedure")
opt.add_option("--g1", action="store", type="float",
dest="g1", default=self.defaults.get("g1"),
help="Artificial heating term coefficient g1")
opt.add_option("--g2", action="store", type="float",
dest="g2", default=self.defaults.get("g2"),
help="Artificial heating term coefficient g2")
opt.add_option("--xsph-eps", action="store", type="float",
dest="xsph_eps", default=self.defaults.get("xsph_eps"),
help="Constant for XSPH")
return opt
def setup_solver(self, options=None):
options = options or self.defaults
hks = options.get("hks")
kernel = self.kernel
# ADKE parameters
h0 = options.get("adke_h0")
eps = options.get("adke_eps")
k = options.get("adke_k")
# Artificial heat parameters
g1 = options.get("g1")
g2 = options.get("g2")
# Artificial viscosity parameters
alpha = options.get("alpha")
beta = options.get("beta")
gamma = options.get("gamma")
xsph_eps = options.get("xsph_eps")
vel_updates=["u","v","w"][:self.dim]
pos_updates=["x","y","z"][:self.dim]
###################################################################
# Add the operations
###################################################################
# reset the smoothing length to h0
self.add_operation(SPHOperation(
sph.SetSmoothingLength.withargs(h0=h0),
on_types=[base.Fluid,],
updates=["h"],
id="setsmoothing")
)
# pilot rho estimate
self.add_operation(SPHOperation(
sph.ADKEPilotRho.withargs(h0=h0),
on_types=[base.Fluid,], from_types=[base.Fluid, base.Boundary],
updates=['rhop'], id='adke_rho'),
)
# smoothing length update
self.add_operation(SPHOperation(
sph.ADKESmoothingUpdate.withargs(h0=h0, k=k, eps=eps, hks=hks),
on_types=[base.Fluid,],
updates=['h'], id='adke'),
)
# summation density
self.add_operation(SPHOperation(
sph.SPHRho.withargs(hks=hks),
on_types=[base.Fluid,], from_types=[base.Fluid,base.Boundary],
updates=['rho'], id = 'density')
)
# ideal gas equation
self.add_operation(SPHOperation(
sph.IdealGasEquation.withargs(gamma=gamma),
on_types = [base.Fluid,], updates=['p', 'cs'], id='eos')
)
# velocity divergence
self.add_operation(SPHOperation(
sph.VelocityDivergence.withargs(hks=hks),
on_types=[base.Fluid], from_types=[base.Fluid, base.Boundary],
updates=['div'], id='vdivergence'),
)
# conduction coefficient update
self.add_operation(SPHOperation(
sph.ADKEConductionCoeffUpdate.withargs(g1=g1, g2=g2),
on_types=[base.Fluid],
updates=['q'], id='qcoeff'),
)
# momentum equation
self.add_operation(SPHIntegration(
sph.MomentumEquation.withargs(alpha=alpha, beta=beta, hks=hks),
from_types=[base.Fluid, base.Boundary], on_types=[base.Fluid],
updates=vel_updates, id='mom')
)
# XSPH correction : defaults to eps = 0
self.add_operation(SPHIntegration(
sph.XSPHCorrection.withargs(eps=xsph_eps, hks=hks),
on_types=[base.Fluid,], from_types=[base.Boundary, base.Fluid],
updates=vel_updates,
id="xsph")
)
# energy equation
self.add_operation(SPHIntegration(
sph.EnergyEquation.withargs(hks=hks,alpha=alpha, beta=beta,
gamma=gamma),
on_types=[base.Fluid], from_types=[base.Fluid, base.Boundary],
updates=['e'],
id='enr')
)
# artificial heat
self.add_operation(SPHIntegration(
sph.ArtificialHeat.withargs(eta=0.1, hks=hks),
on_types=[base.Fluid], from_types=[base.Fluid,base.Boundary],
updates=['e'],
id='aheat'),
)
# position step
self.add_operation(SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[Fluids,],
updates=pos_updates,
id="step")
)
############################################################################
# `MonaghanShockTubeSolver` class
############################################################################
class MonaghanShockTubeSolver(Solver):
def __init__(self, dim, integrator_type, h0, eps, k,
beta=1.0, K=1.0, f=0.5, gamma=1.4,
xsph_eps=0.0, summation_density=True,
kernel=base.CubicSplineKernel, hks=True):
# set the solver dimension
self.dim = dim
# Hernquist and Katz normalization
self.hks = hks
# the SPH kernel to use
self.kernel = kernel(dim)
# set the defaults
self.defaults = dict(gamma=gamma,
adke_eps=eps, adke_k=k, adke_h0=h0,
beta=beta, K=K, f=f,
xsph_eps=xsph_eps,
summation_density=summation_density)
# base class constructor
Solver.__init__(self, dim, integrator_type)
def get_options(self, opt_parser):
opt = OptionGroup(opt_parser, "MonaghanShockTubeSolver options")
opt.add_option("--sd", action="store_true",
dest="summation_density",
default=self.defaults["summation_density"],
help="Use summation density for the density equation")
opt.add_option("--gamma", action="store", type="float",
dest="gamma", default=self.defaults["gamma"],
help="Set the ratio of specific heats gamma")
opt.add_option("--adke-eps", action="store", type="float",
dest="adke_eps", default=self.defaults.get("adke_eps"),
help="Sensitivity parameter eps for the ADKE pocedure")
opt.add_option("--adke-k", action="store", type="float",
dest="adke_k", default=self.defaults.get("adke_k"),
help="Scaling parameter k for the ADKE pocedure")
opt.add_option("--adke-h0", action="store", type="float",
dest="adke_h0", default=self.defaults.get("adke_h0"),
help="Initial smoothing length h0 for the ADKE pocedure")
opt.add_option("--beta", action="store", type="float",
dest="beta", default=self.defaults["beta"],
help="Constant 'beta' for the signal viscosity")
opt.add_option("--f", action="store", type="float",
dest="f", default=self.defaults.get("beta"),
help="Constant 'f' for the signal viscosity")
opt.add_option("--K", action="store", type="float",
dest="K", default=self.defaults.get("K"),
help="Constant 'K' for the signal viscosity")
opt.add_option("--xsph-eps", action="store", type="float",
dest="xsph_eps", default=self.defaults.get("xsph_eps"),
help="Constant for XSPH")
return opt
def setup_solver(self, options=None):
options = options or self.defaults
hks = options.get("hks")
# ADKE parameters
h0 = options.get("adke_h0")
eps = options.get("adke_eps")
k = options.get("adke_k")
# Artificial viscosity parameters
beta = options.get("beta")
K = options.get("K")
f = options.get("f")
gamma = options.get("gamma")
# XSPH eps
xsph_eps = options.get("xsph_eps")
# summation density
sd = options.get("summation_density")
vel_updates=["u","v","w"][:self.dim]
pos_updates=["x","y","z"][:self.dim]
###################################################################
# Add the operations
###################################################################
# reset the smoothing length to h0
self.add_operation(SPHOperation(
sph.SetSmoothingLength.withargs(h0=h0),
on_types=[base.Fluid,],
updates=["h"],
id="setsmoothing")
)
# pilot rho estimate
self.add_operation(SPHOperation(
sph.ADKEPilotRho.withargs(h0=h0),
on_types=[Fluids,], from_types=[Fluids,Boundary],
updates=['rhop'], id='adke_rho'),
)
# smoothing length update
self.add_operation(SPHOperation(
sph.ADKESmoothingUpdate.withargs(h0=h0, k=k, eps=eps, hks=hks),
on_types=[Fluids,],
updates=['h'], id='adke'),
)
# summation density if requested
if sd:
self.add_operation(SPHOperation(
sph.SPHRho.withargs(hks=hks),
on_types=[base.Fluid,], from_types=[base.Fluid, base.Boundary],
updates=["rho"],
id="summation_density")
)
# ideal gas eos
self.add_operation(SPHOperation(
sph.IdealGasEquation.withargs(gamma=gamma),
on_types = [base.Fluid],
updates=['p', 'cs'],
id='eos')
)
# density rate if not summation density
if not sd:
self.add_operation(SPHIntegration(
sph.SPHDensityRate.withargs(hks=hks),
on_types=[base.Fluid,], from_types=[base.Boundary, base.Fluid],
updates=["rho"],
id="densityrate")
)
# momentum equation pressure gradient
self.add_operation(SPHIntegration(
sph.SPHPressureGradient.withargs(hks=hks),
on_types=[base.Fluid,], from_types=[base.Boundary, base.Fluid],
updates=vel_updates,
id="pgrad")
)
# momentum equation artificial viscosity
self.add_operation(SPHIntegration(
sph.MomentumEquationSignalBasedViscosity.withargs(beta=beta,
K=K,
hks=hks),
on_types=[base.Fluid,], from_types=[base.Boundary, base.Fluid],
updates=vel_updates,
id="visc")
)
# XSPH correction : defaults to eps = 0
self.add_operation(SPHIntegration(
sph.XSPHCorrection.withargs(eps=xsph_eps, hks=hks),
on_types=[base.Fluid,], from_types=[base.Boundary, base.Fluid],
updates=vel_updates,
id="xsph")
)
# energy equation
self.add_operation(SPHIntegration(
sph.EnergyEquationWithSignalBasedViscosity.withargs(beta=beta,
K=K,f=f,
hks=hks),
on_types=[base.Fluid,], from_types=[base.Boundary, base.Fluid],
updates=["e"],
id="energy")
)
# position step
self.add_operation(SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[Fluids,],
updates=pos_updates,
id="step")
)
############################################################################
# `ShockTubeSolver` class
############################################################################
class GSPHShockTubeSolver(Solver):
def __init__(self, dim, integrator_type=None):
self.dim = dim
integrator_type = GSPHIntegrator
# base class constructor
Solver.__init__(self, dim, integrator_type)
self.default_kernel = base.GaussianKernel(dim)
def get_options(self, opt_parser):
pass
def setup_solver(self, options=None):
###################################################################
# Add the operations
###################################################################
hks=False
vel_updates=["u","v","w"][:self.dim]
pos_updates=["x","y","z"][:self.dim]
# Summation density
self.add_operation(SPHOperation(
sph.SPHRho.withargs(hks=hks),
on_types=[Fluids], from_types=[Fluids, base.Boundary],
updates=['rho'], id = 'density')
)
# Equation of state
self.add_operation(SPHOperation(
sph.IdealGasEquation.withargs(gamma=1.4),
on_types = [Fluids],
updates=['p', 'cs'],
id='eos')
)
# Momentum equation
self.add_operation(SPHIntegration(
sph.GSPHMomentumEquation.withargs(gamma=1.4),
on_types=[Fluids], from_types=[Fluids, base.Boundary],
updates=vel_updates,
id='mom')
)
# Energy Equation
self.add_operation(SPHIntegration(
sph.GSPHEnergyEquation.withargs(hks=hks),
from_types=[Fluids, base.Boundary],
on_types=[Fluids], updates=['e'],
id='enr')
)
# Position Step
self.add_operation(SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[Fluids,],
updates=pos_updates,
id="step")
)
| Python |
""" An implementation of a general solver base class """
# PySPH imports
from pysph.base.particle_types import ParticleType
from pysph.base.carray import LongArray
from pysph.base.kernels import CubicSplineKernel
from pysph.base.particle_array import get_particle_array
from pysph.sph.kernel_correction import KernelCorrectionManager
from pysph.sph.sph_calc import SPHCalc, CLCalc
from pysph.sph.funcs.position_funcs import PositionStepping
from pysph.sph.funcs.xsph_funcs import XSPHCorrection
from sph_equation import SPHOperation, SPHIntegration
from integrator import EulerIntegrator
from cl_integrator import CLEulerIntegrator
from utils import PBar, savez_compressed, savez, load
from cl_utils import get_cl_devices, HAS_CL, create_some_context
from time_step_functions import TimeStep
if HAS_CL:
import pyopencl as cl
import logging
logger = logging.getLogger()
import os
import sys
import numpy
Fluids = ParticleType.Fluid
class Solver(object):
""" Base class for all PySPH Solvers
**Attributes**
- particles -- the particle arrays to operate on
- integrator_type -- the class of the integrator. This may be one of any
defined in solver/integrator.py
- kernel -- the kernel to be used throughout the calculations. This may
need to be modified to handle several kernels.
- operation_dict -- an internal structure indexing the operation id and
the corresponding operation as a dictionary
- order -- a list of strings specifying the order of an SPH simulation.
- t -- the internal time step counter
- pre_step_functions -- a list of functions to be performed before stepping
- post_step_functions -- a list of functions to execute after stepping
- pfreq -- the output print frequency
- dim -- the dimension of the problem
- kernel_correction -- flag to indicate type of kernel correction.
Defaults to -1 for no correction
- pid -- the processor id if running in parallel
- eps -- the epsilon value to use for XSPH stepping.
Defaults to -1 for no XSPH
"""
def __init__(self, dim, integrator_type):
self.integrator_type = integrator_type
self.dim = dim
self.eps = -1
self.cl_integrator_types = {EulerIntegrator:CLEulerIntegrator}
self.initialize()
def initialize(self):
""" Perform basic initializations """
# set the particles to None
self.particles = None
# default SPH kernel
self.default_kernel = CubicSplineKernel(dim=self.dim)
# flag to use OpenCL
self.with_cl = False
# mapping between operation id's and SPHOperations
self.operation_dict = {}
# Order of precedence for the SPHOperations
self.order = []
# solver time and iteration count
self.t = 0
self.count = 0
self.execute_commands = None
# list of functions to be called before and after an integration step
self.pre_step_functions = []
self.post_step_functions = []
# default output printing frequency
self.pfreq = 100
# Integer identifying the type of kernel correction to use
self.kernel_correction = -1
# the process id for parallel runs
self.pid = None
# set the default rank to 0
self.rank = 0
# set the default mode to serial
self.in_parallel = False
# default function to dynamically compute time step
self.time_step_function = TimeStep()
# arrays to print output
self.arrays_to_print = []
# the default parallel output mode
self.parallel_output_mode = "collected"
# default particle properties to print
self.print_properties = ['x','u','m','h','p','e','rho',]
if self.dim > 1:
self.print_properties.extend(['y','v'])
if self.dim > 2:
self.print_properties.extend(['z','w'])
# flag to print all arrays
self.detailed_output = False
# output filename
self.fname = self.__class__.__name__
# output drectory
self.output_directory = self.fname+'_output'
def switch_integrator(self, integrator_type):
""" Change the integrator for the solver """
if self.particles == None:
raise RuntimeError, "There are no particles!"
if self.with_cl:
self.integrator_type = self.cl_integrator_types[integrator_type]
else:
self.integrator_type = integrator_type
self.setup(self.particles)
def add_operation_step(self, types, xsph=False, eps=0.5):
""" Specify an acceptable list of types to step
Parameters
----------
types : a list of acceptable types eg Fluid, Solid
Notes
-----
The types are defined in base/particle_types.py
"""
updates = ['x','y','z'][:self.dim]
id = 'step'
self.add_operation(SPHIntegration(
PositionStepping, on_types=types, updates=updates, id=id,
kernel=None)
)
def add_operation_xsph(self, eps, hks=False):
""" Set the XSPH operation if requested
Parameters
----------
eps : the epsilon value to use for XSPH stepping
Notes
-----
The position stepping operation must be defined. This is because
the XSPH operation is setup for those arrays that need stepping.
The smoothing kernel used for this operation is the CubicSpline
"""
assert eps > 0, 'Invalid value for XSPH epsilon: %f' %(eps)
self.eps = eps
# create the xsph stepping operation
id = 'xsph'
err = "position stepping function does not exist!"
assert self.operation_dict.has_key('step'), err
types = self.operation_dict['step'].on_types
updates = self.operation_dict['step'].updates
self.add_operation(SPHIntegration(
XSPHCorrection.withargs(eps=eps, hks=hks), from_types=types,
on_types=types, updates=updates, id=id, kernel=self.default_kernel)
)
def add_operation(self, operation, before=False, id=None):
""" Add an SPH operation to the solver.
Parameters
----------
operation : the operation (:class:`SPHOperation`) to add
before : flag to indicate insertion before an id. Defaults to False
id : The id where to insert the operation. Defaults to None
Notes
-----
An SPH operation typically represents a single equation written
in SPH form. SPHOperation is defined in solver/sph_equation.py
The id for the operation must be unique. An error is raised if an
operation with the same id exists.
Similarly, an error is raised if an invalid 'id' is provided
as an argument.
Examples
--------
(1)
>>> solver.add_operation(operation)
This appends an operation to the existing list.
(2)
>>> solver.add_operation(operation, before=False, id=someid)
Add an operation after an existing operation with id 'someid'
(3)
>>> solver.add_operation(operation, before=True, id=soleid)
Add an operation before the operation with id 'someid'
"""
err = 'Operation %s exists!'%(operation.id)
assert operation.id not in self.order, err
assert operation.id not in self.operation_dict.keys()
self.operation_dict[operation.id] = operation
if id:
msg = 'The specified operation doesnt exist'
assert self.operation_dict.has_key(id), msg + ' in the calcs dict!'
assert id in self.order, msg + ' in the order list!'
if before:
self.order.insert(self.order.index(id), operation.id)
else:
self.order.insert(self.order.index(id)+1, operation.id)
else:
self.order.append(operation.id)
def replace_operation(self, id, operation):
""" Replace an operation.
Parameters
----------
id : the operation with id to replace
operation : The replacement operation
Notes
-----
The id to replace is taken from the provided operation.
An error is raised if the provided operation does not exist.
"""
msg = 'The specified operation doesnt exist'
assert self.operation_dict.has_key(id), msg + ' in the op dict!'
assert id in self.order, msg + ' in the order list!'
self.operation_dict.pop(id)
self.operation_dict[operation.id] = operation
idx = self.order.index(id)
self.order.insert(idx, operation.id)
self.order.remove(id)
def remove_operation(self, id_or_operation):
""" Remove an operation with id
Parameters
----------
id_or_operation : the operation to remove
Notes
-----
Remove an operation with either the operation object or the
operation id.
An error is raised if the operation is invalid.
"""
if type(id_or_operation) == str:
id = id_or_operation
else:
id = id_or_operation.id
assert id in self.operation_dict.keys(), 'id doesnt exist!'
assert id in self.order, 'id doesnt exist!'
self.order.remove(id)
self.operation_dict.pop(id)
def set_order(self, order):
""" Install a new order
The order determines the manner in which the operations are
executed by the integrator.
The new order and existing order should match else, an error is raised
"""
for equation_id in order:
msg = '%s in order list does not exist!'%(equation_id)
assert equation_id in self.order, msg
assert equation_id in self.operation_dict.keys(), msg
self.order = order
def setup_position_step(self):
""" Setup the position stepping for the solver """
pass
def setup(self, particles=None):
""" Setup the solver.
The solver's processor id is set if the in_parallel flag is set
to true.
The order of the integrating calcs is determined by the solver's
order attribute.
This is usually called at the start of a PySPH simulation.
By default, the kernel correction manager is set for all the calcs.
"""
if particles:
self.particles = particles
self.particles.kernel = self.default_kernel
# instantiate the Integrator
self.integrator = self.integrator_type(particles, calcs=[])
# setup the SPHCalc objects for the integrator
for equation_id in self.order:
operation = self.operation_dict[equation_id]
if operation.kernel is None:
operation.kernel = self.default_kernel
calcs = operation.get_calcs(particles, operation.kernel)
self.integrator.calcs.extend(calcs)
if self.with_cl:
self.integrator.setup_integrator(self.cl_context)
else:
self.integrator.setup_integrator()
# Setup the kernel correction manager for each calc
calcs = self.integrator.calcs
particles.correction_manager = KernelCorrectionManager(
calcs, self.kernel_correction)
def add_print_properties(self, props):
""" Add a list of properties to print """
for prop in props:
if not prop in self.print_properties:
self.print_properties.append(prop)
def append_particle_arrrays(self, arrays):
""" Append the particle arrays to the existing particle arrays """
if not self.particles:
print 'Warning!, particles not defined'
return
for array in self.particles.arrays:
array_name = array.name
for arr in arrays:
if array_name == arr.name:
array.append_parray(arr)
self.setup(self.particles)
def set_final_time(self, tf):
""" Set the final time for the simulation """
self.tf = tf
def set_time_step(self, dt):
""" Set the time step to use """
self.dt = dt
def set_print_freq(self, n):
""" Set the output print frequency """
self.pfreq = n
def set_arrays_to_print(self, array_names=None):
available_arrays = [array.name for array in self.particles.arrays]
if array_names:
for name in array_names:
if not name in available_arrays:
raise RuntimeError("Array %s not availabe"%(name))
array = self.particles.get_named_particle_array(name)
self.arrays_to_print.append(array)
else:
self.arrays_to_print = self.particles.arrays
def set_output_fname(self, fname):
""" Set the output file name """
self.fname = fname
def set_output_printing_level(self, detailed_output):
""" Set the output printing level """
self.detailed_output = detailed_output
def set_output_directory(self, path):
""" Set the output directory """
self.output_directory = path
def set_kernel_correction(self, kernel_correction):
""" Set the kernel correction manager for each calc """
self.kernel_correction = kernel_correction
for id in self.operation_dict:
self.operation_dict[id].kernel_correction=kernel_correction
def set_parallel_output_mode(self, mode="collected"):
"""Set the default solver dump mode in parallel.
The available modes are:
collected : Collect array data from all processors on root and
dump a single file.
distributed : Each processor dumps a file locally.
"""
self.parallel_output_mode = mode
def set_cl(self, with_cl=False):
""" Set the flag to use OpenCL
This option must be set after all operations are created so that
we may switch the default SPHCalcs to CLCalcs.
The solver must also setup an appropriate context which is used
to setup the ParticleArrays on the device.
"""
self.with_cl = with_cl
if with_cl:
if not HAS_CL:
raise RuntimeWarning, "PyOpenCL not found!"
for equation_id in self.order:
operation = self.operation_dict[equation_id]
# set the type of calc to use for the operation
operation.calc_type = CLCalc
# HACK. THE ONLY CL INTEGRATOR IS EULERINTEGRATOR
#self.integrator_type = self.cl_integrator_types[
# self.integrator_type]
self.integrator_type = CLEulerIntegrator
# Setup the OpenCL context
self.setup_cl()
def set_command_handler(self, callable, command_interval=1):
""" set the `callable` to be called at every `command_interval` iteration
the `callable` is called with the solver instance as an argument
"""
self.execute_commands = callable
self.command_interval = command_interval
def solve(self, show_progress=False):
""" Solve the system
Notes
-----
Pre-stepping functions are those that need to be called before
the integrator is called.
Similarly, post step functions are those that are called after
the stepping within the integrator.
"""
dt = self.dt
bt = (self.tf - self.t)/1000.0
bcount = 0.0
bar = PBar(1001, show=show_progress)
self.dump_output(dt, *self.print_properties)
# set the time for the integrator
self.integrator.time = self.t
while self.t < self.tf:
self.t += dt
self.count += 1
# update the particles explicitly
self.particles.update()
# perform any pre step functions
for func in self.pre_step_functions:
func.eval(self)
# compute the local time step
if not self.with_cl:
dt = self.time_step_function.compute_time_step(self)
# compute the global time step
dt = self.compute_global_time_step(dt)
logger.info("Time %f, time step %f, rank %d"%(self.t, dt,
self.rank))
# perform the integration and update the time
self.integrator.integrate(dt)
self.integrator.time += dt
# update the time for all arrays
self.update_particle_time()
# perform any post step functions
for func in self.post_step_functions:
func.eval(self)
# dump output
if self.count % self.pfreq == 0:
self.dump_output(dt, *self.print_properties)
bcount += self.dt/bt
while bcount > 0:
bar.update()
bcount -= 1
if self.execute_commands is not None:
if self.count % self.command_interval == 0:
self.execute_commands(self)
bar.finish()
def update_particle_time(self):
for array in self.particles.arrays:
array.set_time(self.t)
def compute_global_time_step(self, dt):
if self.particles.in_parallel:
props = {'dt':dt}
glb_min, glb_max = self.particles.get_global_min_max(props)
return glb_min['dt']
else:
return dt
def dump_output(self, dt, *print_properties):
""" Print output based on level of detail required
The default detail level (low) is the integrator's calc's update
property for each named particle array.
The higher detail level dumps all particle array properties.
Format:
-------
A single file named as: <fname>_<rank>_<count>.npz
The output file contains the following fields:
solver_data : Solver related data like time step, time and
iteration count. These are used to resume a simulation.
arrays : A dictionary keyed on particle array names and with
particle properties as value.
version : The version number for this format of file
output. The current version number is 1
Example:
--------
data = load('foo.npz')
version = data['version']
dt = data['solver_data']['dt']
t = data['solver_data']['t']
array = data['arrays'][array_name].astype(object)
array['x']
"""
if self.with_cl:
self.particles.read_from_buffer()
fname = self.fname + '_'
props = {"arrays":{}, "solver_data":{}}
cell_size = None
if not self.with_cl:
cell_size = self.particles.cell_manager.cell_size
_fname = os.path.join(self.output_directory,
fname + str(self.count) +'.npz')
if self.detailed_output:
for array in self.particles.arrays:
props["arrays"][array.name]=array.get_property_arrays(all=True)
else:
for array in self.particles.arrays:
props["arrays"][array.name]=array.get_property_arrays(all=False)
# Add the solver data
props["solver_data"]["dt"] = dt
props["solver_data"]["t"] = self.t
props["solver_data"]["count"] = self.count
if self.parallel_output_mode == "collected" and self.in_parallel:
comm = self.comm
arrays = props["arrays"]
numarrays = len(arrays)
array_names = arrays.keys()
# gather the data from all processors
collected_data = comm.gather(arrays, root=0)
if self.rank == 0:
props["arrays"] = {}
size = comm.Get_size()
# concatenate the arrays
for array_name in array_names:
props["arrays"][array_name] = {}
_props = collected_data[0][array_name].keys()
for prop in _props:
prop_arr = numpy.concatenate( [collected_data[pid][array_name][prop] for pid in range(size)] )
props["arrays"][array_name][prop] = prop_arr
savez(_fname, version=1, **props)
else:
savez(_fname, version=1, **props)
def load_output(self, count):
""" Load particle data from dumped output file.
Parameters
----------
count : string
The iteration time from which to load the data. If time is
'?' then list of available data files is returned else
the latest available data file is used
Notes
-----
Data is loaded from the :py:attr:`output_directory` using the same format
as stored by the :py:meth:`dump_output` method.
Proper functioning required that all the relevant properties of arrays be
dumped
"""
# get the list of available files
available_files = [i.rsplit('_',1)[1][:-4] for i in os.listdir(self.output_directory) if i.startswith(self.fname) and i.endswith('.npz')]
if count == '?':
return sorted(set(available_files), key=int)
else:
if not count in available_files:
msg = """File with iteration count `%s` does not exist"""%(count)
msg += "\nValid iteration counts are %s"%(sorted(set(available_files), key=int))
#print msg
raise IOError(msg)
array_names = [pa.name for pa in self.particles.arrays]
# load the output file
data = load(os.path.join(self.output_directory,
self.fname+'_'+str(count)+'.npz'))
arrays = [ data["arrays"][i] for i in array_names ]
# set the Particle's arrays
self.particles.arrays = arrays
# call the particle's initialize
self.particles.initialize()
self.t = float(data["solver_data"]['t'])
self.count = int(data["solver_data"]['count'])
def setup_cl(self):
""" Setup the OpenCL context and other initializations """
if HAS_CL:
platform = cl.get_platforms()[0]
self.cl_context = cl.Context(platform.get_devices())
def get_options(self, opt_parser):
""" Implement this to add additional options for the application """
pass
def setup_solver(self, options=None):
""" Implement the basic solvers here
All subclasses of Solver may implement this function to add the
necessary operations for the problem at hand.
Look at solver/fluid_solver.py for an example.
Parameters
----------
options : dict
options set by the user using commandline (there is no guarantee
of existence of any key)
"""
pass
############################################################################
| Python |
"""
Classes for generators of some simple elements.
"""
# standard imports
import logging
logger = logging.getLogger()
import numpy
# local imports
from pysph.base.carray import DoubleArray, LongArray
from pysph.base.nnps import *
from pysph.base.point import Point
from pysph.solver.particle_generator import *
from pysph.solver.particle_generator import MassComputationMode as MCM
from pysph.solver.particle_generator import DensityComputationMode as DCM
###############################################################################
# `compute_particle_mass` function.
###############################################################################
def compute_particle_mass(parray, kernel, density=1000.0, h=0.1, dim=3):
"""
Given a particle array, kernel, target density and interaction radius, find
the mass of each particle.
Note that this method works only when the particle radius is constant. This
may also compute incorrect values when the particle cofiguration has voids
within.
"""
centroid = Point(0, 0, 0)
dist = DoubleArray(0)
indices = LongArray(0)
x = parray.get('x')
centroid.x = numpy.sum(x)/float(len(x))
y = None
z = None
logger.debug('particles to compute_particle_mass %d'%(len(x)))
if dim > 1:
y = parray.get('y')
centroid.y = numpy.sum(y)/float(len(y))
if dim > 2:
z = parray.get('z')
centroid.z = numpy.sum(z)/float(len(z))
else:
z = numpy.zeros(len(x), dtype=numpy.float)
else:
y = numpy.zeros(len(x), dtype=numpy.float)
z = y
logger.debug('Centroid : %s'%(centroid))
radius = kernel.radius()
# find the nearest points in parray of the centroid.
brute_force_nnps(pnt=centroid, search_radius=h*radius,
xa=x, ya=y, za=z,
neighbor_indices=indices,
neighbor_distances=dist)
k = 0.0
logger.info('Number of neighbors : %d'%(indices.length))
pnt = Point()
for i in range(indices.length):
pnt.x = x[indices[i]]
pnt.y = y[indices[i]]
pnt.z = z[indices[i]]
k += kernel.py_function(centroid, pnt, h)
logger.info('Kernel sum : %f'%(k))
logger.info('Requested density : %f'%(density))
m = float(density/k)
logger.info('Computed mass : %f'%(m))
return m
###############################################################################
# `find_best_particle_spacing' function.
###############################################################################
def find_best_particle_spacing(length=1.0,
initial_spacing=0.1,
end_points_exact=True,
tolerance=1e-09):
"""
Given the length and initial_spacing return a (possibly) corrected
particle spacing and the number of points.
"""
if length <= tolerance:
return initial_spacing, 0
n_intervals = int(numpy.floor(length/initial_spacing))
if end_points_exact:
r = length - n_intervals*initial_spacing
new_spacing = initial_spacing + float(r/n_intervals)
else:
new_spacing = initial_spacing
r = length - n_intervals*initial_spacing
if r > numpy.fabs(length - ((n_intervals+1)*initial_spacing)):
n_intervals += 1
return new_spacing, (n_intervals+1)
###############################################################################
# `LineGenerator` class.
###############################################################################
class LineGenerator(ParticleGenerator):
"""
Generate a line of points.
"""
def __init__(self,
output_particle_arrays=[],
particle_mass=-1.0,
mass_computation_mode=MCM.Compute_From_Density,
particle_density=1000.0,
density_computation_mode=DCM.Set_Constant,
particle_h=0.1,
kernel=None,
start_point=Point(0, 0, 0),
end_point=Point(0, 0, 1),
particle_spacing=0.05,
end_points_exact=True,
tolerance=1e-09,
*args, **kwargs):
"""
"""
self.start_point = Point(start_point.x,
start_point.y,
start_point.z)
self.end_point = Point(end_point.x,
end_point.y,
end_point.z)
self.particle_spacing = particle_spacing
self.end_points_exact = end_points_exact
self.tolerance = tolerance
def get_coords(self):
"""
Returns 3 numpy arrays representing the coordinates of the generated
points.
"""
dir = self.end_point - self.start_point
distance = dir.length()
if distance <= self.tolerance:
x = numpy.asarray([], dtype=float)
y = numpy.asarray([], dtype=float)
z = numpy.asarray([], dtype=float)
return x, y, z
normal = dir/distance
new_spacing, np = find_best_particle_spacing(
length=distance,
initial_spacing=self.particle_spacing,
end_points_exact=self.end_points_exact,
tolerance=self.tolerance)
x = numpy.zeros(np, dtype=float)
y = numpy.zeros(np, dtype=float)
z = numpy.zeros(np, dtype=float)
for i in range(np):
x[i] = self.start_point.x + i*normal.x*new_spacing
y[i] = self.start_point.y + i*normal.y*new_spacing
z[i] = self.start_point.z + i*normal.z*new_spacing
return x, y, z
def validate_setup(self):
"""
"""
return ParticleGenerator.validate_setup(self)
def generate_func(self):
"""
Generate a complete particle array with the required properties
computed.
"""
# setup the output particle array as required.
self._setup_outputs()
# find the coordinates
x, y, z = self.get_coords()
# add the generated particles to the output particle array
output = self.output_particle_arrays[0]
output.add_particles(x=x, y=y, z=z)
# check if 'h' has to be set.
if self.particle_h > 0.:
output.h[:] = self.particle_h
# check if density has to be set.
if self.density_computation_mode == DCM.Set_Constant:
output.rho[:] = self.particle_density
# check if mass has to be set.
if self.mass_computation_mode == MCM.Set_Constant:
output.m[:] = self.particle_mass
elif self.mass_computation_mode == MCM.Compute_From_Density:
m = compute_particle_mass(density=self.particle_density,
h=self.particle_h,
parray=output,
kernel=self.kernel,
dim=3)
output.m[:] = m
def num_output_arrays(self):
"""
Return the number of output particles arrays this generator will be
generating.
"""
return 1
###############################################################################
# `RectangleGenerator` class.
###############################################################################
class RectangleGenerator(ParticleGenerator):
"""
Class to generate rectangles of particles - filled and hollow.
"""
def __init__(self,
input_particle_arrays=[],
particle_mass=-1.0,
mass_computation_mode=MCM.Compute_From_Density,
particle_density=1000.0,
density_computation_mode=DCM.Set_Constant,
particle_h=0.1,
kernel=None,
filled=True,
start_point=Point(0, 0, 0),
end_point=Point(1, 1, 0),
particle_spacing_x1=0.1,
particle_spacing_x2=0.1,
end_points_exact=True,
tolerance=1e-09,
*args, **kwargs):
"""
"""
ParticleGenerator.__init__(self,
input_particle_arrays=input_particle_arrays,
particle_mass=particle_mass,
mass_computation_mode=mass_computation_mode,
particle_density=particle_density,
density_computation_mode=density_computation_mode,
particle_h=particle_h,
kernel=kernel)
self.filled = filled
self.start_point = Point(start_point.x, start_point.y, start_point.z)
self.end_point = Point(end_point.x, end_point.y, end_point.z)
self.particle_spacing_x1 = particle_spacing_x1
self.particle_spacing_x2 = particle_spacing_x2
self.end_points_exact = end_points_exact
self.tolerance = tolerance
def num_output_arrays(self):
"""
"""
return 1
def validate_setup(self):
"""
Make sure the input is valid.
"""
if ParticleGenerator.validate_setup(self) == False:
return False
return self._validate_input_points()
def _validate_input_points(self):
"""
Make sure a proper rectangle has been requested by the input points.
"""
dir = [0, 0, 0]
if self.start_point.x != self.end_point.x:
dir[0] = 1
if self.start_point.y != self.end_point.y:
dir[1] = 1
if self.start_point.z != self.end_point.z:
dir[2] = 1
if sum(dir) != 2:
msg = 'Incorrect input points specified'
msg += '\n'
msg += str(self.start_point)+' , '+str(self.end_point)
logger.error(msg)
return False
return True
def get_coords(self):
"""
"""
# based on the input points, decide which is the plane this rectangle is
# going to lie on.
if self._validate_input_points() is False:
return None
dir = [0, 0, 0]
if self.start_point.x != self.end_point.x:
dir[0] = 1
if self.start_point.y != self.end_point.y:
dir[1] = 1
if self.start_point.z != self.end_point.z:
dir[2] = 1
if dir[0] == 1:
if dir[1] == 1:
x, y, z = self._generate_x_y_rectangle()
else:
x, y, z = self._generate_x_z_rectangle()
else:
x, y, z = self._generate_y_z_rectangle()
return x, y, z
def _generate_x_y_rectangle(self):
"""
Generate a rectangle in the x-y plane.
"""
if self.start_point.x < self.end_point.x:
start_x1 = self.start_point.x
end_x1 = self.end_point.x
else:
start_x1 = self.end_point.x
end_x1 = self.start_point.x
if self.start_point.y < self.end_point.y:
start_x2 = self.start_point.y
end_x2 = self.end_point.y
else:
start_x2 = self.end_point.y
end_x2 = self.start_point.y
spacing1 = self.particle_spacing_x1
spacing2 = self.particle_spacing_x2
x, y = self.generate_rectangle_coords(start_x1=start_x1,
start_x2=start_x2,
end_x1=end_x1,
end_x2=end_x2,
spacing1=spacing1,
spacing2=spacing2)
z = numpy.zeros(len(x))
return x, y, z
def _generate_x_z_rectangle(self):
"""
Generate a rectangle in the x-z plane.
"""
if self.start_point.x < self.end_point.x:
start_x1 = self.start_point.x
end_x1 = self.end_point.x
else:
start_x1 = self.end_point.x
end_x1 = self.start_point.x
if self.start_point.z < self.end_point.z:
start_x2 = self.start_point.z
end_x2 = self.end_point.z
else:
start_x2 = self.end_point.z
end_x2 = self.start_point.z
spacing1 = self.particle_spacing_x1
spacing2 = self.particle_spacing_x2
x, z = self.generate_rectangle_coords(start_x1=start_x1,
start_x2=start_x2,
end_x1=end_x1,
end_x2=end_x2,
spacing1=spacing1,
spacing2=spacing2)
y = numpy.zeros(len(x))
return x, y, z
def _generate_y_z_rectangle(self):
"""
Generate a rectangle in the y-z plane.
"""
if self.start_point.y < self.end_point.y:
start_x1 = self.start_point.y
end_x1 = self.end_point.y
else:
start_x1 = self.end_point.y
end_x1 = self.start_point.y
if self.start_point.z < self.end_point.z:
start_x2 = self.start_point.z
end_x2 = self.end_point.z
else:
start_x2 = self.end_point.z
end_x2 = self.start_point.z
spacing1 = self.particle_spacing_x1
spacing2 = self.particle_spacing_x2
y, z = self.generate_rectangle_coords(start_x1=start_x1,
start_x2=start_x2,
end_x1=end_x1,
end_x2=end_x2,
spacing1=spacing1,
spacing2=spacing2)
x = numpy.zeros(len(y))
return x, y, z
def generate_rectangle_coords(self, start_x1, start_x2, end_x1, end_x2,
spacing1, spacing2):
"""
Generates a rectangle from the given start and end points, with the
given spacing.
"""
width = end_x1-start_x1
height = end_x2-start_x2
if width <= 0.0 or height <= 0.0 or spacing1 <= 0.0 or spacing2 <= 0:
msg = 'Incorrect values :\n'
msg = 'width=%f, height=%f, spacing1=%f, spacing1=%f'%(
width, height, spacing1, spacing2)
raise ValueError, msg
new_spacing1, n1 = find_best_particle_spacing(length=width,
initial_spacing=spacing1,
end_points_exact=\
self.end_points_exact,
tolerance=self.tolerance)
new_spacing2, n2 = find_best_particle_spacing(length=height,
initial_spacing=spacing2,
end_points_exact=\
self.end_points_exact,
tolerance=self.tolerance)
if self.filled == False:
n2 -= 2
n = 2*n1 + 2*n2
else:
n = n1*n2
x1 = numpy.zeros(n, dtype=float)
x2 = numpy.zeros(n, dtype=float)
if self.filled is True:
pindx = 0
for i in range(n1):
for j in range(n2):
x1[pindx] = start_x1 + i*new_spacing1
x2[pindx] = start_x2 + j*new_spacing2
pindx += 1
else:
pindx = 0
# generate the bottom horizontal lines
for i in range(n1):
x1[pindx] = start_x1 + i*new_spacing1
x2[pindx] = start_x2
pindx += 1
end_x1 = x1[pindx-1]
# now generate the left vertical line
for i in range(n2):
x1[pindx] = start_x1
x2[pindx] = start_x2 + (i+1)*new_spacing2
pindx += 1
end_x2 = x2[pindx-1] + new_spacing2
# the top
for i in range(n1):
x1[pindx] = start_x1 + i*new_spacing1
x2[pindx] = end_x2
pindx += 1
# the right side
for i in range(n2):
x1[pindx] = end_x1
x2[pindx] = start_x2 + (i+1)*new_spacing2
pindx += 1
return x1, x2
def generate_func(self):
"""
Generate a complete particle array with the required properties
computed.
"""
# setup the output particle array as required.
self._setup_outputs()
# find the coordinates
x, y, z = self.get_coords()
# add the generated particles to the output particle array
output = self.output_particle_arrays[0]
output.add_particles(x=x, y=y, z=z)
# check if 'h' has to be set.
if self.particle_h > 0.:
output.h[:] = self.particle_h
# check if density has to be set.
if self.density_computation_mode == DCM.Set_Constant:
output.rho[:] = self.particle_density
# check if mass has to be set.
if self.mass_computation_mode == MCM.Set_Constant:
output.m[:] = self.particle_mass
elif self.mass_computation_mode == MCM.Compute_From_Density:
m = compute_particle_mass(density=self.particle_density,
h=self.particle_h,
parray=output,
kernel=self.kernel,
dim=3)
output.m[:] = m
###############################################################################
# `CuboidGenerator` class.
###############################################################################
class CuboidGenerator(ParticleGenerator):
"""
Class to generate cuboids of particles (filled and hollow).
"""
def __init__(self,
output_particle_arrays=[],
particle_mass=-1.0,
mass_computation_mode=MCM.Compute_From_Density,
particle_density=1000.0,
density_computation_mode=DCM.Set_Constant,
particle_h=0.1,
kernel=None,
filled=True,
exclude_top=False,
start_point=Point(0, 0, 0),
end_point=Point(1, 1, 1),
particle_spacing_x=0.1,
particle_spacing_y=0.1,
particle_spacing_z=0.1,
end_points_exact=True,
tolerance=1e-09,
*args,
**kwargs):
"""
Constructor.
"""
self.filled = filled
self.exclude_top = exclude_top
self.start_point = Point(start_point.x,
start_point.y,
start_point.z)
self.end_point = Point(end_point.x,
end_point.y,
end_point.z)
self.particle_spacing_x = particle_spacing_x
self.particle_spacing_y = particle_spacing_y
self.particle_spacing_z = particle_spacing_z
self.end_points_exact = end_points_exact
self.tolerance = tolerance
def num_output_arrays(self):
"""
Number of particle arrays generated by this generated.
"""
return 1
def validate_setup(self):
"""
Make sure the input is valid.
"""
if ParticleGenerator.validate_setup(self) == False:
return False
return self._validate_input_points()
def _validate_input_points(self):
"""
Make sure the end points input are proper.
"""
length = self.end_point.x - self.start_point.x
depth = self.end_point.y - self.start_point.y
width = self.end_point.z - self.start_point.z
if (self.particle_spacing_x < 0.0 or
self.particle_spacing_y < 0.0 or
self.particle_spacing_z < 0.0 or
abs(length)-self.particle_spacing_x < 0. or
abs(depth)-self.particle_spacing_y < 0. or
abs(width)-self.particle_spacing_z < 0.):
msg = 'Incorrect input paramters specified'
logger.error(msg)
return False
return True
def get_coords(self):
"""
Returns 3 numpy arrays representing the coordinates of the generated points.
"""
start_point, end_point, length, depth, width = self._get_end_points()
particle_spacing_x, nx = find_best_particle_spacing(
length=length,
initial_spacing=self.particle_spacing_x)
particle_spacing_y, ny = find_best_particle_spacing(
length=depth,
initial_spacing=self.particle_spacing_y)
particle_spacing_z, nz = find_best_particle_spacing(
length=width,
initial_spacing=self.particle_spacing_z)
logger.info('x-spacing : %f, nx : %d'\
%(particle_spacing_x, nx))
logger.info('y-spacing : %f, ny : %d'\
%(particle_spacing_y, ny))
logger.info('z-spacing : %f, nz : %d'\
%(particle_spacing_z, nz))
if self.filled == True:
return self._generate_filled_cuboid(start_point, end_point,
nx, ny, nz,
particle_spacing_x,
particle_spacing_y,
particle_spacing_z)
else:
return self._generate_empty_cuboid(start_point, end_point,
nx, ny, nz,
particle_spacing_x,
particle_spacing_y,
particle_spacing_z)
def generate_func(self):
"""
Generate the complete particle array with the required properties computed.
"""
self._setup_outputs()
# compute the coords.
x, y, z= self.get_coords()
# add the generated particles to the output particle array
output = self.output_particle_arrays[0]
output.add_particles(x=x, y=y, z=z)
# check if 'h' has to be set
if self.particle_h > 0.:
output.h[:] = self.particle_h
# check if the density is to be set.
if self.density_computation_mode == DCM.Set_Constant:
output.rho[:] = self.particle_density
# check if the mass has to be computed.
if self.mass_computation_mode == MCM.Set_Constant:
output.m[:] = self.particle_mass
elif self.mass_computation_mode == MCM.Compute_From_Density:
m = compute_particle_mass(density=self.particle_density,
h=self.particle_h,
parray=output,
kernel=self.kernel,
dim=3)
output.m[:] = m
def _get_end_points(self):
"""
Return changed end points so that start_point to end_point is moving in
positive direction for all coords.
"""
length = self.end_point.x - self.start_point.x
depth = self.end_point.y - self.start_point.y
width = self.end_point.z - self.start_point.z
start_point = Point()
end_point = Point()
if length < 0:
start_point.x = self.end_point.x
end_point.x = self.start_point.x
else:
start_point.x = self.start_point.x
end_point.x = self.end_point.x
if depth < 0:
start_point.y = self.end_point.y
end_point.y = self.start_point.y
else:
start_point.y = self.start_point.y
end_point.y = self.end_point.y
if width < 0:
start_point.z = self.end_point.z
end_point.z = self.start_point.z
else:
start_point.z = self.start_point.z
end_point.z = self.end_point.z
return start_point, end_point, abs(length), abs(depth), abs(width)
def _generate_empty_cuboid(self, start_point, end_point, nx, ny, nz,
particle_spacing_x, particle_spacing_y,
particle_spacing_z):
"""
"""
logger.info('Input num pts : %d %d %d'%(nx, ny, nz))
if self.exclude_top is True:
ny -= 1
n = 0
n += 2*nx*ny # for the z-max and z-min planes
nz -= 2
if self.exclude_top is False:
n += 2*nx*nz # for the y-max and y-min planes
ny -= 2
else:
n += nx*nz
ny -= 1
n += 2*ny*nz # for the x-max and x-min planes
if self.exclude_top is False:
ny += 2
else:
ny += 1
nz += 2
x = numpy.zeros(n, dtype=float)
y = numpy.zeros(n, dtype=float)
z = numpy.zeros(n, dtype=float)
pindx = 0
logger.info('Now using num pts : %d %d %d'%(nx, ny, nz))
logger.info('Computed number of points : %d'%(n))
# generate the z-min and max planes
for i in range(nx):
for j in range(ny):
x[pindx] = start_point.x + i*particle_spacing_x
y[pindx] = start_point.y + j*particle_spacing_y
z[pindx] = start_point.z
pindx += 1
for i in range(nx):
for j in range(ny):
x[pindx] = start_point.x + i*particle_spacing_x
y[pindx] = start_point.y + j*particle_spacing_y
z[pindx] = end_point.z
pindx += 1
# generate the bottom and top planes
for i in range(nx):
for k in range(nz-2):
x[pindx] = start_point.x + i*particle_spacing_x
y[pindx] = start_point.y
z[pindx] = start_point.z + (k+1)*particle_spacing_z
pindx += 1
if self.exclude_top is False:
for i in range(nx):
for k in range(nz-2):
x[pindx] = start_point.x + i*particle_spacing_x
y[pindx] = end_point.y
z[pindx] = start_point.z + (k+1)*particle_spacing_z
pindx += 1
# generate the left and right planes
if self.exclude_top is True:
ny += 1
for j in range(ny-2):
for k in range(nz-2):
x[pindx] = start_point.x
y[pindx] = start_point.y + (j+1)*particle_spacing_y
z[pindx] = start_point.z + (k+1)*particle_spacing_z
pindx += 1
for j in range(ny-2):
for k in range(nz-2):
x[pindx] = end_point.x
y[pindx] = start_point.y + (j+1)*particle_spacing_y
z[pindx] = start_point.z + (k+1)*particle_spacing_z
pindx += 1
logger.info('Last pindx value : %d'%(pindx))
return x, y, z
def _generate_filled_cuboid(self, start_point, end_point, nx, ny, nz,
particle_spacing_x, particle_spacing_y,
particle_spacing_z):
"""
"""
n = nx*ny*nz
x = numpy.zeros(n, dtype=float)
y = numpy.zeros(n, dtype=float)
z = numpy.zeros(n, dtype=float)
pindx = 0
for i in range(nx):
for j in range(ny):
for k in range(nz):
x[pindx] = start_point.x + i*particle_spacing_x
y[pindx] = start_point.y + j*particle_spacing_y
z[pindx] = start_point.z + k*particle_spacing_z
pindx += 1
return x, y, z
| Python |
import threading
import os
import socket
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
from SimpleHTTPServer import SimpleHTTPRequestHandler
from multiprocessing.managers import BaseManager, BaseProxy
class MultiprocessingInterface(BaseManager):
""" A multiprocessing interface to the solver controller
This object exports a controller instance proxy over the multiprocessing
interface. Control actions can be performed by connecting to the interface
and calling methods on the controller proxy instance """
def __init__(self, address=None, authkey=None, try_next_port=False):
BaseManager.__init__(self, address, authkey)
self.authkey = authkey
self.try_next_port = try_next_port
def get_controller(self):
return self.controller
def start(self, controller):
self.controller = controller
self.register('get_controller', self.get_controller)
if not self.try_next_port:
self.get_server().serve_forever()
host, port = self.address
while self.try_next_port:
try:
BaseManager.__init__(self, (host,port), self.authkey)
self.get_server().serve_forever()
self.try_next_port = False
except socket.error as e:
try_next_port = False
import errno
if e.errno == errno.EADDRINUSE:
port += 1
else:
raise
class MultiprocessingClient(BaseManager):
""" A client for the multiprocessing interface
Override the run() method to do appropriate actions on the proxy
instance of the controller object or add an interface using the add_interface
methods similar to the Controller.add_interface method """
def __init__(self, address=None, authkey=None, serializer='pickle', start=True):
BaseManager.__init__(self, address, authkey, serializer)
if start:
self.start()
def start(self, connect=True):
self.interfaces = []
# to work around a python caching bug
# http://stackoverflow.com/questions/3649458/broken-pipe-when-using-python-multiprocessing-managers-basemanager-syncmanager
if self.address in BaseProxy._address_to_local:
del BaseProxy._address_to_local[self.address][0].connection
self.register('get_controller')
if connect:
self.connect()
self.controller = self.get_controller()
self.run(self.controller)
@staticmethod
def is_available(address):
try:
socket.create_connection(address, 1).close()
return True
except socket.error:
return False
def run(self, controller):
pass
def add_interface(self, callable):
""" This makes it act as substitute for the actual command_manager """
thr = threading.Thread(target=callable, args=(self.controller,))
thr.daemon = True
thr.start()
return thr
class CrossDomainXMLRPCRequestHandler(SimpleXMLRPCRequestHandler,
SimpleHTTPRequestHandler):
""" SimpleXMLRPCRequestHandler subclass which attempts to do CORS
CORS is Cross-Origin-Resource-Sharing (http://www.w3.org/TR/cors/)
which enables xml-rpc calls from a different domain than the xml-rpc server
(such requests are otherwise denied)
"""
def do_OPTIONS(self):
""" Implement the CORS pre-flighted access for resources """
self.send_response(200)
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Allow-METHODS", "POST,GET,OPTIONS")
#self.send_header("Access-Control-Max-Age", "60")
self.send_header("Content-length", "0")
self.end_headers()
def do_GET(self):
""" Handle http requests to serve html/image files only """
print self.path, self.translate_path(self.path)
permitted_extensions = ['.html','.png','.svg','.jpg', '.js']
if not os.path.splitext(self.path)[1] in permitted_extensions:
self.send_error(404, 'File Not Found/Allowed')
else:
SimpleHTTPRequestHandler.do_GET(self)
def end_headers(self):
""" End response header with adding Access-Control-Allow-Origin
This is done to enable CORS request from all clients """
self.send_header("Access-Control-Allow-Origin", "*")
SimpleXMLRPCRequestHandler.end_headers(self)
class XMLRPCInterface(SimpleXMLRPCServer):
""" An XML-RPC interface to the solver controller
Currently cannot work with objects which cannot be marshalled
(which is basically most custom classes, most importantly
ParticleArray and numpy arrays) """
def __init__(self, addr, requestHandler=CrossDomainXMLRPCRequestHandler,
logRequests=True, allow_none=True,
encoding=None, bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
allow_none, encoding, bind_and_activate)
def start(self, controller):
self.register_instance(controller, allow_dotted_names=False)
self.register_introspection_functions()
self.serve_forever()
class CommandlineInterface(object):
""" command-line interface to the solver controller """
def start(self, controller):
while True:
try:
inp = raw_input('pysph[%d]>>> '%controller.get('count'))
cmd = inp.strip().split()
try:
cmd, args = cmd[0], cmd[1:]
except Exception as e:
print 'Invalid command'
self.help()
continue
args2 = []
for arg in args:
try:
arg = eval(arg)
except:
pass
finally:
args2.append(arg)
if cmd=='p' or cmd=='pause':
controller.pause_on_next()
elif cmd=='c' or cmd=='cont':
controller.cont()
elif cmd=='g' or cmd=='get':
print controller.get(args[0])
elif cmd=='s' or cmd=='set':
print controller.set(args[0], args2[1])
elif cmd=='q' or cmd=='quit':
break
else:
print getattr(controller, cmd)(*args2)
except Exception as e:
self.help()
print e
def help(self):
print '''Valid commands are:
p | pause
c | cont
g | get <name>
s | set <name> <value>
q | quit -- quit commandline interface (solver keeps running)'''
| Python |
""" A wrapper around the sph calc and function to make it simpler """
from pysph.sph.sph_calc import SPHCalc, CLCalc
from pysph.sph.sph_func import SPHFunctionParticle
from pysph.base.particle_types import ParticleType
Fluid = ParticleType.Fluid
Solid = ParticleType.Solid
class SPHOperation(object):
""" This class that represents a general SPH operation
An operation is defined as any equation appearing in the system to
be solved. This may be an assignment of a certain property or the
evaluation of a certain forcing function.
Examples of operations are:
.. math::
\rho = \sum_{j=1}^{N} m_j\,W_{ij}
p = (\gamma - 1.0)\rho e
Each equation necessarily defines a *destination* and posiibly a
*source* particle on which it operates. This is usually problem
specific and the SPHCalc objects which compute the necessary
interactions is constructed from this information given.
Data Members:
--------------
function -- The function (:class:`sph_func.Function`) to use for evaluating
the RHS in an SPH operation. One function is created for
each source-destination pair.
from_types -- The influencing type of particles. All particle arrays
matching these types are sources.
on_types -- The influenced type of particles. All particle arrays matching
these types are destinations.
updates -- A list of strings indicating the destination particle
properties updated by the resulting SPHCalc object.
id -- A unique id for this operation
intgrates -- Bool indicating if the RHS evaluated by the Operation
is an acceleration.
Member Functions:
------------------
get_calc -- Return an appropriate calc for the kind of operation requested
"""
def __init__(self, function, on_types, id, updates=[], kernel=None,
from_types=[], kernel_correction=-1, integrates=False):
""" Constructor
Parameters:
-----------
function -- The SPHFunction for this operation.
on_types -- valid destination (influenced) particle array types.
from_types -- valid source (influencing) particle array types
kernel -- the kernel to use for this operation.
kernel_correction -- Type of kernel correction to use. (-1) means
no correction.
integrates -- Flag indicating if the function evaluates an
acceleration.
"""
self.from_types = from_types
self.on_types = on_types
self.function = function
self.updates = updates
self.id = id
self.integrates = integrates
self.kernel = kernel
if not issubclass(function.get_func_class(), SPHFunctionParticle):
self.nbr_info = False
else:
self.nbr_info = True
self.has_kernel_correction = False
self.kernel_correction = kernel_correction
if kernel_correction != -1:
self.has_kernel_correction = True
self.calc_type = SPHCalc
def get_calc_data(self, particles):
""" Group particle arrays as src-dst pairs with appropriate
functions as required by SPHCalc
Return a dictionary of calc properties keyed on particle array
id in particles which corresponds to the destination particle
array for that calc.
The calc properties are:
sources -- a list of particle arrays considered as sources for this
calc
funcs -- a list of num_sources functions of the same type to operate
between destination-source pair.
dnum -- the destination particle array id in particles for this calc.
id -- a unique identifier for the calc
"""
if issubclass(self.function.get_func_class(), SPHFunctionParticle):
# all nbr requiring funcs are subclasses of SPHFunctionParticle
self.nbr_info = True
else:
self.nbr_info = False
if self.from_types != []:
raise ValueError, 'Only Subclasses of SPHFunctionParticle need neighbors, %s should not provide `from_types` to operation'%(self.function.get_func_class())
arrays = particles.arrays
narrays = len(arrays)
calc_data = {}
for i in range(narrays):
dst = arrays[i]
# create an entry in the dict if this is a valid destination array
if dst.particle_type in self.on_types or dst in self.on_types or dst.name in self.on_types:
calc_data[i] = {'sources':[], 'funcs':[], 'dnum':i, 'id':"",
'snum':str(i)}
# if from_types == [], no neighbor info is required!
if self.from_types == []:
func = self.function.get_func(dst, dst)
func.id = self.id
calc_data[i]['funcs'] = [func]
calc_data[i]['id'] = self.id + '_' + dst.name
else:
# check for the sources for this destination
for j in range(narrays):
src = arrays[j]
# check if this is a valid source array
if src.particle_type in self.from_types or src in self.from_types or src.name in self.from_types:
# get the function with the src dst pair
func = self.function.get_func(source=src, dest=dst)
func.id = self.id
# make an entry in the dict for this destination
calc_data[i]['sources'].append(src)
calc_data[i]['funcs'].append(func)
calc_data[i]['snum'] = calc_data[i]['snum']+str(j)
calc_data[i]['id'] = self.id + '_' + dst.name
if calc_data[i]['sources'] == []:
msg = "No source found for %s operation"%(self.id)
raise RuntimeWarning, msg
return calc_data
def get_calcs(self, particles, kernel):
""" Return a list of calcs for the operation.
An SPHCalc is created for each destination particle array for
the operation. The calc may have a list of sources with one
function for each src-dst pair.
Parameters:
------------
particles -- the collection of particle arrays to consider
kernel -- the smoothing kernel to use for the operation
"""
calcs = []
calc_data = self.get_calc_data(particles)
arrays = particles.arrays
narrays = len(arrays)
for i in range(narrays):
if calc_data.has_key(i):
dest = arrays[i]
srcs = calc_data[i]['sources']
funcs = calc_data[i]['funcs']
dnum = calc_data[i]['dnum']
snum = calc_data[i]['snum']
id = calc_data[i]['id']
calc = self.calc_type(
particles=particles, sources=srcs, dest=dest,
funcs=funcs, kernel=kernel, updates=self.updates,
integrates=self.integrates, dnum=dnum, id=id,
dim=kernel.dim, snum=snum,
kernel_correction=self.kernel_correction,
nbr_info=self.nbr_info,
)
calcs.append(calc)
return calcs
class SPHIntegration(SPHOperation):
""" Return an integrating calc (via get_calc()) for an SPH equation of form
\frac{D prop}{Dt} = \vec{F}
where `prop` is the property being updated and `F` is the forcing
term which may be a scalar or a vector.
Example:
--------
.. math::
\frac{D\vec{v}}{Dt} = \vec{g}
\frac{Dv}{Dt} = \sum_{j=1}^{N} m_j \frac{v_j}{\rho_j}\nablaW_{ij}
Note:
-----
This class is just a convenience (and an alias) over creating an integrating
:class:`SPHOperation` with integrates=True
"""
def __init__(self, function, on_types, id, updates=[], kernel=None,
from_types=[], kernel_correction=-1):
SPHOperation.__init__(self, function, on_types, id, updates, kernel,
from_types=from_types, integrates=True,
kernel_correction=kernel_correction,)
| Python |
HAS_CL = True
try:
import pyopencl as cl
except ImportError:
HAS_CL=False
from os import path
import numpy
from utils import get_pysph_root
# Return all available devices on the host
def get_cl_devices():
""" Return a dictionary keyed on device type for all devices """
_devices = {'CPU':[], 'GPU':[]}
platforms = cl.get_platforms()
for platform in platforms:
devices = platform.get_devices()
for device in devices:
if device.type == cl.device_type.CPU:
_devices['CPU'].append(device)
elif device.type == cl.device_type.GPU:
_devices['GPU'].append(device)
return _devices
def create_context_from_cpu():
""" Create an OpenCL context using the CPU as the default device """
cl_devices = get_cl_devices()
if ( cl_devices['CPU'] == [] ):
raise ValueError("No CPU device found! ")
return cl.Context( devices=cl_devices['CPU'] )
def create_context_from_gpu():
""" Create an OpenCL context using the CPU as the default device """
cl_devices = get_cl_devices()
if ( cl_devices['GPU'] == [] ):
raise ValueError("No GPU device found! ")
return cl.Context( devices=cl_devices['GPU'] )
def create_some_context():
""" Create a "reasonable" context from the available devices.
Preference is given to CPU devices over GPU devices.
"""
devices = get_cl_devices()
cpu_devices = devices['CPU']
gpu_devices = devices['GPU']
if ( len( cpu_devices ) > 0 ):
context = cl.Context( devices = cpu_devices )
elif ( len( gpu_devices ) > 0 ):
context = cl.Context( devices = gpu_devices )
else:
raise ValueError("No devices found!")
return context
def iscpucontext(ctx):
"""Return True or False if the context is for a CPU device"""
for device in ctx.devices:
if device.type == cl.device_type.CPU:
return True
def isgpucontext(ctx):
for device in ctx.devices:
if device.type == cl.device_type.GPU:
return True
def get_cl_include():
""" Include directories for OpenCL definitions """
PYSPH_ROOT = get_pysph_root()
if cl.version.VERSION_TEXT == "2011.1beta3":
inc_dir = '-I'+path.join(PYSPH_ROOT, 'base') + " " + \
'-I'+path.join(PYSPH_ROOT, 'solver')
elif cl.version.VERSION_TEXT == "2011.1.1":
inc_dir = ["-I" + path.join(PYSPH_ROOT, "base"),
"-I" + path.join(PYSPH_ROOT, "solver") ]
else: # assume it is the latest version
inc_dir = ["-I" + path.join(PYSPH_ROOT, "base"),
"-I" + path.join(PYSPH_ROOT, "solver") ]
#raise RuntimeWarning("Not supported yet")
return inc_dir
def get_scalar_buffer(val, dtype, ctx):
""" Return a cl.Buffer object that can be passed as a scalar to kernels """
mf = cl.mem_flags
arr = numpy.array([val,], dtype)
return cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=arr)
def cl_read(filename, precision='double', function_name=None):
"""Read an OpenCL source file.
The function also adds a few convenient #define's so as to allow us
to write common code for both float and double precision. This is
done by specifying the `precision` argument which defaults to
'float'. The OpenCL code itself should be written to use REAL for
the type declaration. The word REAL will be #defined to change
precision on the fly. For conveinence REAL2, REAL3, REAL4 and REAL8
are all defined as well.
Parameters
----------
filename : str
Name of file to open.
precision : {'single', 'double'}, optional
The floating point precision to use.
function_name: str, optional
An optional function name to indicate a block to extract from
the OpenCL template file.
"""
if precision not in ['single', 'double']:
msg = "Invalid argument for 'precision' should be 'single'"\
" or 'double'."
raise ValueError(msg)
src = open(filename).read()
if function_name:
src = src.split('$'+function_name)[1]
if precision == 'single':
typ = 'float'
hdr = "#define F f \n"
else:
typ = 'double'
hdr = "#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n"
hdr += '#define F \n'
for x in ('', '2', '3', '4', '8'):
hdr += '#define REAL%s %%(typ)s%s\n'%(x, x)
hdr = hdr%(dict(typ=typ))
return hdr + src
def get_real(val, precision):
""" Return a suitable floating point number for OpenCL.
Parameters
----------
val : float
The value to convert.
precision : {'single', 'double'}
The precision to use.
"""
if precision == "single":
return numpy.float32(val)
elif precision == "double":
return numpy.float64(val)
else:
raise ValueError ("precision %s not supported!"%(precision))
def create_program(template, func, loc=None):
""" Create an OpenCL program given a template string and function
Parameters
----------
template: str
The template source file that is read using cl_read
func: SPHFunctionParticle
The function that provides the kernel arguments to the template
loc: NotImplemented
A template is the basic outline of an OpenCL kernel for a
SPHFunctionParticle. The arguments to the kernel and neighbor
looping code needs to be provided to render it a valid OpenCL
kenrel.
"""
k_args = []
func.set_cl_kernel_args()
k_args.extend(func.cl_args_name)
# Build the kernel args string.
kernel_args = ',\n '.join(k_args)
# Get the kernel workgroup code
workgroup_code = func.get_cl_workgroup_code()
# Construct the neighbor loop code.
neighbor_loop_code = "for (int src_id=0; src_id<nbrs; ++src_id)"
return template%(locals())
def enqueue_copy(queue, src, dst):
if cl.version.VERSION_TEXT == "2011.1beta3":
if ( isinstance(dst, cl.Buffer) ):
if ( isinstance(src, cl.Buffer) ):
# device to device copy
cl.enqueue_copy_buffer(queue, src=src, dst=dst)
elif ( isinstance(src, numpy.ndarray) ):
# host to device copy
cl.enqueue_write_buffer(queue, mem=dst, hostbuf=src)
elif ( isinstance(src, cl.Buffer) ):
cl.enqueue_read_buffer(queue, mem=src, hostbuf=dst)
elif cl.version.VERSION_TEXT == "2011.1.1":
cl.enqueue_copy(queue, dest=dst, src=src).wait()
else: # we assume that it is the latest version
cl.enqueue_copy(queue, dest=dst, src=src).wait()
queue.finish()
def round_up(n):
"""Round up 'n' to the nearest power of 2
The code here is borrowed from AMD APP SDK 2.5:
SDKCommon.cpp::roundToPowerOf2
"""
n -= 1
for i in range(8):
n |= n >> (1<<i)
n += 1
return n
def ispowerof2(val):
"""Test if the input is a power of 2"""
if( (val & (-val))-val == 0 & (val != 0)):
return True
else:
return False
def uint32mask():
"""Reserved value for 32 bit unsigned ints"""
return (1<<32) - 1
| Python |
from numpy import arccos, sin, cos, array, sqrt, pi
r = 2.0/pi
dt = 1e-3
def force(x,y):
theta = arccos(x/sqrt((x**2+y**2)))
return array([-sin(theta), cos(theta)])
def rk2(nsteps=1000, x0=r, y0=0):
t = 0
xinitial = x0
yinitial = y0
while t < nsteps:
_x = xinitial
_y = yinitial
k1x, k1y = force(xinitial, yinitial)
xinitial = _x + 0.5*dt*k1x; yinitial = _y + 0.5*dt*k1y
k2x, k2y = force(xinitial, yinitial)
xnew = _x + (0.5*dt)*(k1x + k2x)
ynew = _y + (0.5*dt)*(k1y + k2y)
xinitial = xnew
yinitial = ynew
t += 1
pass
return xnew, ynew
def rk4(steps=1000, x0=r, y0=0):
t = 0
xinitial = x0
yinitial = y0
while t < steps:
_x = xinitial
_y = yinitial
k1x, k1y = force(xinitial, yinitial)
xinitial = _x + 0.5*dt*k1x; yinitial = _y + 0.5*dt*k1y
k2x, k2y = force(xinitial, yinitial)
xinitial =_x + 0.5*dt*k2x; yinitial = _y + 0.5*dt*k2y
k3x, k3y = force(xinitial, yinitial)
xinitial = _x + dt*k3x; yinitial = _y + dt*k3y
k4x, k4y = force(xinitial, yinitial)
xnew = _x + (dt/6.0)*(k1x + 2*k2x + 2*k3x + k4x)
ynew = _y + (dt/6.0)*(k1y + 2*k2y + 2*k3y + k4y)
xinitial = xnew
yinitial = ynew
t += 1
pass
return xnew, ynew
def euler(nsteps=1000, x0=r, y0=0):
t = 0
xinitial = x0
yinitial = y0
while t < nsteps:
k1x, k1y = dt*force(xinitial, yinitial)
xnew = xinitial + k1x
ynew = yinitial + k1y
xinitial = xnew
yinitial = ynew
t += 1
pass
return xnew, ynew
| Python |
""" An example solver for the circular patch of fluid """
import numpy
import pysph.base.api as base
import pysph.sph.api as sph
from solver import Solver
from sph_equation import SPHOperation, SPHIntegration
Fluids = base.ParticleType.Fluid
Solids = base.ParticleType.Solid
def get_circular_patch(name="", type=0, dx=0.025/1.3,
cl_precision="single", **kwargs):
x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -1.05:1.05+1e-4:dx]
x = x.ravel()
y = y.ravel()
m = numpy.ones_like(x)*dx*dx
h = numpy.ones_like(x)*2*dx
rho = numpy.ones_like(x)
p = 0.5*1.0*100*100*(1 - (x**2 + y**2))
cs = numpy.ones_like(x) * 100.0
u = -100*x
v = 100*y
indices = []
for i in range(len(x)):
if numpy.sqrt(x[i]*x[i] + y[i]*y[i]) - 1 > 1e-10:
indices.append(i)
pa = base.get_particle_array(x=x, y=y, m=m, rho=rho, h=h, p=p, u=u, v=v,
cs=cs,name=name, type=type,
cl_precision=cl_precision)
la = base.LongArray(len(indices))
la.set_data(numpy.array(indices))
pa.remove_particles(la)
pa.set(idx=numpy.arange(len(pa.x)))
print 'Number of particles: ', len(pa.x)
return pa
class FluidSolver(Solver):
def setup_solver(self, options=None):
#create the sph operation objects
self.add_operation(SPHOperation(
sph.TaitEquation.withargs(co=100.0, ro=1.0),
on_types=[Fluids],
updates=['p', 'cs'],
id='eos')
)
self.add_operation(SPHIntegration(
sph.SPHDensityRate.withargs(hks=False),
from_types=[Fluids], on_types=[Fluids],
updates=['rho'],
id='density')
)
self.add_operation(SPHIntegration(
sph.MomentumEquation.withargs(alpha=0.01, beta=0.0, hks=False),
from_types=[Fluids], on_types=[Fluids],
updates=['u','v','w'],
id='mom')
)
self.add_operation_step([Fluids])
self.add_operation_xsph(eps=0.1, hks=False)
#############################################################################
| Python |
from integrator import EulerIntegrator, RK2Integrator, RK4Integrator,\
PredictorCorrectorIntegrator, LeapFrogIntegrator
from cl_integrator import CLEulerIntegrator
from sph_equation import SPHIntegration, SPHOperation
from solver import Solver
from shock_tube_solver import ShockTubeSolver, ADKEShockTubeSolver,\
MonaghanShockTubeSolver, GSPHShockTubeSolver
from fluid_solver import FluidSolver, get_circular_patch
import shock_tube_solver, fluid_solver
from basic_generators import LineGenerator, CuboidGenerator, RectangleGenerator
from particle_generator import DensityComputationMode, MassComputationMode, \
ParticleGenerator
from application import Application
from post_step_functions import SaveCellManagerData
from plot import ParticleInformation
from utils import savez, savez_compressed, get_distributed_particles, mkdir, \
get_pickled_data, get_pysph_root, load
from cl_utils import HAS_CL, get_cl_devices, get_cl_include, \
get_scalar_buffer, cl_read, get_real, create_program,\
create_context_from_cpu, create_context_from_gpu, create_some_context,\
enqueue_copy, round_up, uint32mask
from time_step_functions import ViscousTimeStep, ViscousAndForceBasedTimeStep,\
VelocityBasedTimeStep
| Python |
#! /usr/bin/env python
# Author: Stefan Behnel <scoder@users.berlios.de>
# http://hg.cython.org/cython-devel/file/tip/Tools/cython-epydoc.py
#
# --------------------------------------------------------------------
import re
from epydoc import docstringparser as dsp
CYTHON_SIGNATURE_RE = re.compile(
# Class name (for builtin methods)
r'^\s*((?P<class>\w+)\.)?' +
# The function name
r'(?P<func>\w+)' +
# The parameters
r'\(((?P<self>(?:self|cls|mcs)),?)?(?P<params>.*)\)' +
# The return value (optional)
r'(\s*(->)\s*(?P<return>\w+(?:\s*\w+)))?' +
# The end marker
r'\s*(?:\n|$)')
parse_signature = dsp.parse_function_signature
def parse_function_signature(func_doc, doc_source,
docformat, parse_errors):
PYTHON_SIGNATURE_RE = dsp._SIGNATURE_RE
assert PYTHON_SIGNATURE_RE is not CYTHON_SIGNATURE_RE
try:
dsp._SIGNATURE_RE = CYTHON_SIGNATURE_RE
found = parse_signature(func_doc, doc_source,
docformat, parse_errors)
dsp._SIGNATURE_RE = PYTHON_SIGNATURE_RE
if not found:
found = parse_signature(func_doc, doc_source,
docformat, parse_errors)
return found
finally:
dsp._SIGNATURE_RE = PYTHON_SIGNATURE_RE
dsp.parse_function_signature = parse_function_signature
# --------------------------------------------------------------------
from epydoc.cli import cli
cli()
# --------------------------------------------------------------------
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Settings for the Au-to-do app."""
TEMPLATE_BASE_PATH = 'templates/'
# Prediction API credentials keyname.
CREDENTIALS_KEYNAME = 'prediction_credentials'
# OAuth 2.0 related constant.
CLIENT_ID = (
'your_client_id'
)
CLIENT_SECRET = 'your_client_secret'
# TODO(user): Make sure that all the scopes are included.
SCOPES = ['https://www.googleapis.com/auth/prediction']
USER_AGENT = 'au-to-do'
DOMAIN = 'anonymous'
# Whether or not to use memcache for caching of JSON models.
USE_MEMCACHE_FOR_JSON_MODELS = True
MEMCACHE_VERSION_PREFIX = '1-'
# Google Interoperable Access
GS_INTEROPERABLE_ACCESS = 'your_legacy_access_key'
GS_INTEROPERABLE_SECRET = 'your_legacy_access_secret'
GS_BUCKET = 'autodo-predictionmodels'
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configures the API handler for the application."""
from datetime import datetime
import logging
from google.appengine.dist import use_library
use_library('django', '1.2')
from django.utils import simplejson
from apiclient import errors
from apiclient.discovery import build
from oauth2client import client
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from iso8601 import parse_date
from google.appengine.ext.webapp.util import run_wsgi_app
import httplib2
import model
import tasks_utils
class IncidentError(Exception):
"""Base class for any errors handling Incidents."""
pass
class InvalidIncidentIdError(IncidentError):
"""Raised if a request with an invalid Incident ID is received."""
pass
class InvalidListError(IncidentError):
"""Raised if a request is received with invalid list query paremeters."""
pass
class InvalidDateError(IncidentError):
"""Raised if a request is received with invalid date query paremeters."""
pass
class IncidentNotFoundError(IncidentError):
"""Raised if an Incident for the given ID cannot be found."""
pass
class UserNotAuthenticatedError(IncidentError):
"""Raised if a request refers to a user that isn't authenticated."""
pass
class UserSettingsError(Exception):
"""Base class for any errors handling UserSettings."""
pass
class UserOrSettingsNotFoundError(UserSettingsError):
"""Raised if a User is not logged-in or has no settings."""
pass
# TODO(user): Add admin settings as well in the API.
class UserSettingsHandler(webapp.RequestHandler):
"""Handles all RESTful operations on UserSettings."""
def _HandleUserOrSettingsNotFoundError(self):
"""Handle an UserOrSettingsNotFoundError, yield an HTTP 404."""
self.response.clear()
self.response.set_status(404)
self.response.out.write(
'Settings for the user could not be found.')
def _HandleBadRequestError(self):
"""Handle a bad request, yield an HTTP 400."""
self.response.clear()
self.response.set_status(400)
self.response.out.write(
'Bad request.')
def _GetUserSettings(self):
"""Return the current user's settings.
Returns:
User's settings.
Raises:
UserOrSettingsNotFoundError: No user or settings were found.
"""
user = users.get_current_user()
settings = None
if user:
settings = model.UserSettings.get_by_key_name(user.email())
if settings:
return settings
else:
raise UserOrSettingsNotFoundError()
def _DumpResults(self, settings, task_lists=None):
"""Dumps the settings into a JSON object.
Args:
settings: Current user's settings.
task_lists: Task lists to append to the result.
"""
result = {
'addToTasks': settings.add_to_tasks,
'taskListId': settings.task_list_id,
'taskLists': task_lists or []
}
self.response.out.write(simplejson.dumps(result))
def _GetTaskLists(self, credentials):
"""Retrieve user's tasklists.
Args:
credentials: User's credentials.
Returns:
List of task lists.
"""
try:
service = build(
'tasks', 'v1', http=credentials.authorize(httplib2.Http()))
tasklists = service.tasklists().list().execute()
return [{'id': x['id'], 'title': x['title']} for x in tasklists['items']]
except client.AccessTokenRefreshError:
raise UserOrSettingsNotFoundError()
except errors.HttpError:
return []
def get(self):
"""Retrieve all settings for the current user."""
try:
settings = self._GetUserSettings()
self._DumpResults(
settings, self._GetTaskLists(settings.tasks_credentials))
except UserOrSettingsNotFoundError:
self._HandleUserOrSettingsNotFoundError()
def put(self):
"""Update all provided settings for the current user."""
try:
settings = self._GetUserSettings()
body = simplejson.loads(self.request.body)
task_lists = self._GetTaskLists(settings.tasks_credentials)
add_to_tasks = body.get('addToTasks')
if add_to_tasks is not None:
logging.info('Setting addToTasks to %s', add_to_tasks)
settings.add_to_tasks = add_to_tasks
task_list_id = body.get('taskListId')
if task_list_id is not None:
found = False
for task_list in task_lists:
if task_list['id'] == task_list_id:
found = True
break
if not found:
raise ValueError()
settings.task_list_id = task_list_id
logging.info('Saving.')
settings.put()
self._DumpResults(settings, task_lists)
except ValueError:
self._HandleBadRequestError()
except UserOrSettingsNotFoundError:
self._HandleUserOrSettingsNotFoundError()
class IncidentHandler(webapp.RequestHandler):
"""Handles all RESTful operations on Incidents."""
def _GetIdFromUri(self):
"""Return the ID from the current request URI, or None if no ID found."""
parts = self.request.path.split('/')
if len(parts) == 5 and parts[-1]:
try:
return int(parts[-1])
except ValueError:
raise InvalidIncidentIdError()
return None
def _HandleInvalidIncidentIdError(self):
"""Handle an InvalidIncidentIdError, yield an HTTP 400."""
self.response.clear()
self.response.set_status(400)
self.response.out.write('The incident ID you provided is not an integer.')
def _HandleInvalidListError(self):
"""Handle an InvalidListError, yield an HTTP 400."""
self.response.clear()
self.response.set_status(400)
self.response.out.write(
'One of the list parameters in your request was not in CSV format.')
def _HandleInvalidDateError(self):
"""Handle an InvalidIncidentIdError, yield an HTTP 400."""
self.response.clear()
self.response.set_status(400)
self.response.out.write(('One of the date parameters in your request was '
'not in ISO 8601 format.'))
def _HandleUserNotAuthenticatedError(self):
"""Handle a UserNotAuthenticatedError, yield an HTTP 401."""
self.response.clear()
self.response.clear()
self.response.set_status(401)
self.response.out.write('One of the parameters refers to the current '
'user, but the user is not authenticated.')
def _HandleIncidentNotFoundError(self):
"""Handle an IncidentNotFoundError, yield an HTTP 404."""
self.response.clear()
self.response.set_status(404)
self.response.out.write(
'An incident with the provided ID could not be found.')
def _HandleUnknownError(self, log_message):
"""Handle an unknown error, yield an HTTP 500, and log the given message.
Args:
log_message: Message giving details about what happened.
"""
logging.error(log_message)
self.response.clear()
self.response.set_status(500)
self.response.out.write(
'An unknown error has occurred. Please try your request again.')
def _GetIncidentByUriId(self):
"""Try to find an Incident based on the ID in this request's URI.
Returns:
Found Incident, or None if no Incident was found.
Raises:
IncidentNotFoundError: Incident with given ID not found in datastore.
"""
incident_id = self._GetIdFromUri()
incident = model.Incident.get_by_id(incident_id)
if incident is None:
raise IncidentNotFoundError()
return incident
def get(self):
"""Retrieve either all incidents, or a single incident by ID.
If an ID is found in the current request URI, then look up that individual
Incident, and render it as output.
If no ID is found in the current request URI, render all Incidents as
output.
"""
try:
incident_id = self._GetIdFromUri()
if incident_id is not None:
self._GetById()
else:
self._GetAll()
except InvalidIncidentIdError:
self._HandleInvalidIncidentIdError()
def _GetById(self):
"""Render the Incident with the given ID to output."""
try:
incident = self._GetIncidentByUriId()
self.response.out.write(simplejson.dumps(
incident.GetDict(), default=IncidentHandler._DateToSerializable))
except IncidentNotFoundError:
self._HandleIncidentNotFoundError()
except db.Error, e:
self._HandleUnknownError(e)
@staticmethod
def _DateToSerializable(obj):
"""simplejson can't render dates. Helper method to fix that."""
if isinstance(obj, datetime):
return obj.isoformat()
@staticmethod
def _StrToDatetime(value):
"""Convert the given ISO formatted string into a datetime."""
return parse_date(value)
def _ApplyRequestFiltersToQuery(self, query):
"""Apply query parameters as query filters."""
# Only one field can be filtered with an inequality operation at a time.
inequality_field = None
for param, property_operator, func, inequality in INCIDENT_FILTERS:
if self.request.get(param) and (not inequality_field or
not inequality or
inequality_field == inequality):
func(query, property_operator, self.request.get(param))
inequality_field = inequality_field or inequality
return query
def _GetAll(self):
"""Render all Incidents to output."""
try:
incidents = self._ApplyRequestFiltersToQuery(model.Incident.all())
self.response.out.write(simplejson.dumps(
map(lambda i: i.GetDict(), incidents),
default=IncidentHandler._DateToSerializable))
except InvalidListError:
self._HandleInvalidListError()
except InvalidDateError:
self._HandleInvalidDateError()
except UserNotAuthenticatedError:
self._HandleUserNotAuthenticatedError()
except db.Error, e:
self._HandleUnknownError(e)
def post(self):
"""Create the Incident described by the request POST body."""
try:
incident = model.Incident.FromJson(self.request.body)
incident.id = None
incident.put()
tasks_utils.AddTask(incident)
model.Tag.CreateMissingTags(incident)
self.response.set_status(201)
except db.Error, e:
self._HandleUnknownError(e)
def put(self):
"""Update the Incident described by the request body."""
try:
incident = self._GetIncidentByUriId()
new_incident = model.Incident.FromJson(self.request.body)
incident.Overlay(new_incident)
incident.put()
incident.PurgeJsonCache()
tasks_utils.UpdateTask(incident)
model.Tag.CreateMissingTags(incident)
self.response.set_status(204)
except IncidentNotFoundError:
self._HandleIncidentNotFoundError()
except db.Error, e:
self._HandleUnknownError(e)
def delete(self):
"""Delete the Incident with the given ID."""
try:
incident = self._GetIncidentByUriId()
tasks_utils.DeleteTask(incident)
incident.delete()
self.response.set_status(204)
except IncidentNotFoundError:
self._HandleIncidentNotFoundError()
except db.Error, e:
self._HandleUnknownError(e)
@staticmethod
def ApplyFilter(query, property_operator, value):
"""Apply a single filter to the model query.
Args:
query: App Engine model query to apply the filters to.
property_operator: String containing the property name, and an optional
comparison operator.
value: Single filter.
Returns:
Query.
"""
query.filter(property_operator, value)
return query
@staticmethod
def ApplyPersonalFilter(query, property_operator, value):
"""Apply a single filter to the model query, replacing the value of 'me'.
Args:
query: App Engine model query to apply the filters to.
property_operator: String containing the property name, and an optional
comparison operator.
value: Single filter, possibly containing 'me' referring to the current
user.
Returns:
Query.
Raises:
UserNotAuthenticatedError: User referenced by me is not authenticated.
"""
if value == 'me':
user = users.get_current_user()
if user:
value = user.email()
else:
raise UserNotAuthenticatedError
query.filter(property_operator, value)
return query
@staticmethod
def ApplyListFilter(query, property_operator, filters):
"""Apply a list of filter to the model query.
Args:
query: App Engine model query to apply the filters to.
property_operator: String containing the property name, and an optional
comparison operator.
filters: Comma seperated string.
Returns:
Query.
Raises:
InvalidListError.
"""
try:
for tag in filters.split(','):
if tag:
query.filter(property_operator, tag)
except ValueError:
raise InvalidListError
return query
@staticmethod
def ApplyDateFilter(query, property_operator, date):
"""Apply a date filter to the model query.
Args:
query: App Engine model query to apply the filters to.
property_operator: String containing the property name, and an optional
comparison operator.
date: String representing a date value.
Returns:
Query.
Raises:
InvalidListError.
"""
try:
query.filter(property_operator, parse_date(date))
except TypeError:
raise InvalidDateError
except ValueError:
raise InvalidDateError
return query
# Incident filters to be used in IncidentHandler._ApplyRequestFiltersToQuery
# method and in landing.LandingPage to display the filters.
# List of tuples consisting of:
# * Query parameter.
# * Property operator on the model query.
# * Function to apply the filter to the model query (e.g _ApplyFilter).
# * The property name on which an inequality operation is applied. None if
# operation is not an inequality operation.
INCIDENT_FILTERS = [
('accepted_tags', 'accepted_tags = ', IncidentHandler.ApplyListFilter,
None),
('suggested_tags', 'suggested_tags = ', IncidentHandler.ApplyListFilter,
None),
('owner', 'owner = ', IncidentHandler.ApplyPersonalFilter, None),
('status', 'status = ', IncidentHandler.ApplyFilter, None),
('created_before', 'created < ', IncidentHandler.ApplyDateFilter,
'created'),
('created_after', 'created > ', IncidentHandler.ApplyDateFilter, 'created'),
('updated_before', 'updated < ', IncidentHandler.ApplyDateFilter,
'updated'),
('updated_after', 'updated > ', IncidentHandler.ApplyDateFilter, 'updated'),
('resolved_before', 'resolved < ', IncidentHandler.ApplyDateFilter,
'resolved'),
('resolved_after', 'resolved > ', IncidentHandler.ApplyDateFilter,
'resolved'),
]
def main():
"""Runs the application."""
application = webapp.WSGIApplication(
[
('/resources/v1/incidents.*', IncidentHandler),
('/resources/v1/userSettings', UserSettingsHandler)
],
debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import logging
import re
import string
import StringIO
import unicodedata
from apiclient import errors
from apiclient.discovery import build
from oauth2client import client
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import gslite
import httplib2
import model
import settings
def CleanText(text, quote=False):
"""Transform a string into a string of terms suitable for a training set.
The Prediction API treats each word as a separate term, so make all
words lower case and remove all punctuation. This is one area where
experimenting with pre-processing can yield different prediction
fidelity, so it is likely that this function should be updated for
specialized parsing.
This implementation converts everything to ASCII.
Args:
text: A string to be cleaned.
quote: True if you want the results to be quoted.
Returns:
A string suitable for use in a .csv
"""
to_remove = string.whitespace + string.punctuation
text = unicodedata.normalize('NFKD', text).encode('ascii', 'replace')
replace = re.compile('[%s]' % re.escape(to_remove))
new_text = replace.sub(' ', text)
new_text = re.sub(' +', ' ', new_text)
new_text = new_text.lower()
if quote:
new_text = '\"%s\"' % new_text
return new_text
def ConcatenateMessages(incident):
"""Find all the Messages attached to an Incident and return bodies.
Args:
incident: Incident instance which is the parent of the Messages.
Returns:
A string, possibly very long, containing all the text from all the
Messages attached to an Incident.
"""
messages = model.Message.gql('WHERE incident = :1', incident.key())
return ' '.join(message.body for message in messages)
def RefreshTagsAndModels():
"""Update all the Tags and SuggestionModel.ui_tags."""
incidents = model.Incident.all()
for incident in incidents:
model.Tag.CreateMissingTags(incident)
tags = model.Tag.all()
for tag in tags:
model.SuggestionModel.CreateMissingModel(tag.name)
def BuildCSVRow(incident, tag=None, recycled=None):
"""Create an example row suitable for a training CSV file or streaming.
This incident makes some expensive calls on text processing and
data retrieval, so it returns all of the processed data in an
"opaque" dictionary. You can optionally pass this dictionary back
in to the function through recycled if you would like to save on
processing.
Args:
incident: a model.Incident to parse.
tag: String form of the tag name. If present it will always be
the first element of the returned string (per the Prediction
API training format).
recycled: "opaque" dictionary (for use only by this
function). Modified by reference, you should pass this object
back in if you are processing the same incident for multiple
tags.
Returns:
String suitable for a row in the prediction (no tag) or
training stream (with tag).
"""
if not recycled:
recycled = {}
if 'body' not in recycled:
recycled['body'] = CleanText(ConcatenateMessages(incident))
recycled['title'] = CleanText(incident.title)
row_items = [recycled['title'], recycled['body']]
if tag:
# Tag should not be CleanText'd because it must match exactly.
row_items.insert(0, tag)
return ','.join('\"%s\"' % item for item in row_items)
def BuildCSVTrainingSet(model_name, write_file, tag_counts, training=False):
"""Create a training set containing every example of a model.
Args:
model_name: String form of the model name.
write_file: IO-based object with 'write' method.
tag_counts: Dictionary of tags and a count of their examples.
training: True if this CSV will be used immediately for training,
to update the incident statistics
Returns:
Tuple containing:
* the set of tags added to the training set.
* the list of incicdent used for training.
"""
added_tags = set()
trained_incidents = []
model_tags = []
tags = model.Tag.all()
for tag in tags:
if model.Tag.ModelMatches(model_name, tag.name):
model_tags.append(tag.name)
incidents = model.Incident.all()
# TODO(user) Add a filter to debounce incidents which have been
# updated recently. The user may still be making changes.
# Note: "IN" queries are limited to 30 list elements (sub-queries)!
if len(model_tags) > 30:
logging.error('There are too many tags in %s to query with a single IN.',
model_name)
incidents.filter('accepted_tags IN', model_tags)
for incident in incidents:
processed_incident = {}
for tag in incident.accepted_tags:
if tag in model_tags:
added_tags.add(tag)
if training:
incident.trained_tags.append(tag)
write_file.write(BuildCSVRow(incident, tag=tag,
recycled=processed_incident))
write_file.write('\n')
if tag in tag_counts:
tag_counts[tag] += 1
else:
tag_counts[tag] = 1
trained_incidents.append(incident)
if training:
incident.trained_tags = list(set(incident.trained_tags))
incident.updated = datetime.utcnow()
incident.trained_date = incident.updated
# incident.training_review should remain unchanged because we
# only checked one model. This incident may belong to multiple
# models, some of which have already been trained.
return (added_tags, trained_incidents)
class Suggester(webapp.RequestHandler):
"""Learn and suggest tags for Incidents.
Learn from user-provided tags ("Accepted" tags)
and suggest tags for Incidents as Messages arrive.
"""
def _SuggestTags(self, key, service):
"""Get suggestions for tags from the Prediction API.
Updates the Incident with one suggested tag for each model.
Args:
key: Model Key for Incident to receive suggested tags.
service: Built API service class, pre-authorized for OAuth.
"""
incident = db.get(key)
if not incident:
logging.error('_SuggestTags: No Incident with id=' + key)
else:
csv_instance = BuildCSVRow(incident)
sample = {'input': {'csvInstance': [csv_instance]}}
model_list = model.SuggestionModel.all()
suggested = []
for suggestion_model in model_list:
if suggestion_model.training_examples:
prediction = service.trainedmodels().predict(
id=suggestion_model.training_file, body=sample).execute()
logging.info('Model:%s Prediction=%s', suggestion_model.name,
prediction)
# Only add labels that are not already assigned to the incident
if prediction['outputLabel'] not in incident.accepted_tags:
suggested.append(prediction['outputLabel'])
if suggested:
incident.suggested_tags = suggested
logging.info('_SuggestTags: Final Suggestions=%s', ','.join(suggested))
incident.PurgeJsonCache()
incident.updated = datetime.utcnow()
incident.put()
model.Tag.CreateMissingTags(incident)
def post(self):
"""Handle a POST request by returning suggestions from the prediction API.
POST Parameters:
incident_key: String form of Incident Key.
Returns:
Nothing. Modifies Incident.suggested_tags.
"""
logging.info('Suggester.post')
incident_key = self.request.get('incident_key')
if not incident_key:
logging.error('No incident_key provided')
return
else:
incident_key = db.Key(incident_key)
credentials = model.Credentials.get_by_key_name(
settings.CREDENTIALS_KEYNAME)
if credentials:
credentials = credentials.credentials
http = httplib2.Http()
http = credentials.authorize(http)
service = build('prediction', 'v1.4', http=http)
self._SuggestTags(incident_key, service)
class Trainer(webapp.RequestHandler):
"""Make Examples and train the Prediction Engine from the Examples."""
def _UpdateTraining(self, training):
"""Update the Prediction API training model with new models and examples.
Args:
training: The Prediction API training service, already authorized.
"""
trained_model_query = db.GqlQuery('SELECT * FROM SuggestionModel '
'WHERE training_examples > 0')
trained_model_names = {}
for trained_model in trained_model_query:
trained_model_names[trained_model.name] = trained_model.training_file
logging.info('TRAINED MODEL=%s', trained_model.name)
# Note on Query design: I originally wanted to select where
# updated>trained, but the right value (trained) cannot be another
# column in the Incident, it must be a constant. Instead I
# created a new field, training_review, which is True when
# training should look at the Incident for changes and False when
# the Incident has been processed.
# TODO(user): optimize training_review so that it is only set
# when tags change. Right now it is set whenever the Incident is
# updated.
updated_incidents = db.GqlQuery('SELECT * FROM Incident '
'WHERE training_review = TRUE')
for updated_incident in updated_incidents:
if updated_incident.title:
logging.info('UPDATED INCIDENT = ' + updated_incident.title)
processed_incident = {}
new_tags = (set(updated_incident.accepted_tags) -
set(updated_incident.trained_tags))
for new_tag in new_tags:
new_tag_model = model.Tag.ModelCategory(new_tag)['model']
if new_tag_model in trained_model_names:
example = BuildCSVRow(updated_incident, tag=new_tag,
recycled=processed_incident)
logging.info('%s\n\tROW = %s', trained_model_names[new_tag_model],
example)
current_model = model.SuggestionModel.get_by_key_name(new_tag_model)
gs_full_name = '%s/%s' % (settings.GS_BUCKET,
current_model.training_file)
csv_instance = {'label': new_tag, 'csvInstance': [example]}
# TODO(user) Check training result for success.
try:
training.update(
id=current_model.training_file, body=csv_instance).execute()
updated_incident.trained_tags.append(new_tag)
except errors.HttpError, error:
if 'Training running' not in error.content:
# Trained model insert failed, reset the training status for this
# tag.
logging.error(
'Failed to retrieve trained model %s', new_tag_model)
current_model.training_examples = 0
current_model.put()
except client.AccessTokenRefreshError:
logging.error('Failed to update training set %s', gs_full_name)
updated_incident.trained_tags = list(set(updated_incident.trained_tags))
updated_incident.training_review = False
updated_incident.put()
# Go through the untrained models second because they can ignore the
# training_review flag.
untrained_models = db.GqlQuery('SELECT * FROM SuggestionModel '
'WHERE training_examples = 0')
storage = gslite.GsClient(access_key=settings.GS_INTEROPERABLE_ACCESS,
secret=settings.GS_INTEROPERABLE_SECRET)
tag_counts = {}
for untrained_model in untrained_models:
logging.info('UNTRAINED MODEL = ' + untrained_model.name)
string_file = StringIO.StringIO()
tags, trained_incidents = BuildCSVTrainingSet(
untrained_model.name, string_file, tag_counts, training=True)
if len(tags) > 1:
gs_object_name = untrained_model.name
gs_full_name = '%s/%s.csv' % (settings.GS_BUCKET, gs_object_name)
body = {
'id': gs_object_name,
'storageDataLocation': gs_full_name
}
storage.put_object(
settings.GS_BUCKET, gs_object_name + '.csv', string_file,
extra_headers={'x-goog-acl': 'project-private'})
string_file.close()
# TODO(user) check result for success
training.insert(body=body).execute()
untrained_model.training_file = gs_object_name
untrained_model.training_date = datetime.utcnow()
untrained_model.training_examples = len(trained_incidents)
untrained_model.training_tags = tag_counts.keys()
untrained_model.put()
for incident in trained_incidents:
incident.put()
# Update the statistics in the related Tag
for tag in tag_counts:
tag_object = model.Tag.get_by_key_name(tag)
tag_object.example_count = tag_counts[tag]
tag_object.trained_count = tag_counts[tag]
tag_object.trained_date = datetime.utcnow()
tag_object.put()
def _DownloadCSV(self, model_name):
"""Generate a csv file suitable for use as a training set.
Provides download file and updates Tags in datastore.
Args:
model_name: model.name. All Accepted tags for this model will be
processed to create one training set.
"""
now = datetime.utcnow()
suggestion_model = model.SuggestionModel.get_by_key_name(model_name)
suggestion_model.export_file = '%s-%s.csv' % (model_name, now.isoformat())
disposition = 'attachment; filename=%s' % suggestion_model.export_file
self.response.headers['Content-Type'] = 'text/csv'
self.response.headers['Content-Disposition'] = disposition
tag_counts = {}
temp_file = StringIO.StringIO()
_, trained_incidents = BuildCSVTrainingSet(model_name, temp_file,
tag_counts)
self.response.out.write(temp_file.getvalue())
temp_file.close()
# Update the statistics in the related Tag
for tag in tag_counts:
tag_object = model.Tag.get_by_key_name(tag)
tag_object.example_count = tag_counts[tag]
tag_object.put()
# Update the statistics in the SuggestionModel
suggestion_model.export_date = now
suggestion_model.export_tags = tag_counts.keys()
suggestion_model.ui_tags = suggestion_model.export_tags
suggestion_model.export_examples = len(trained_incidents)
suggestion_model.put()
def _Refresh(self):
"""Force a new training set for all tags."""
RefreshTagsAndModels()
credentials = model.Credentials.get_by_key_name(
settings.CREDENTIALS_KEYNAME)
if credentials:
credentials = credentials.credentials
http = httplib2.Http()
http = credentials.authorize(http)
service = build('prediction', 'v1.4', http=http)
train = service.trainedmodels()
self._UpdateTraining(train)
def get(self):
"""Private endpoint for Cron job automatic training."""
if self.request.headers.get('X-AppEngine-Cron') == 'true':
logging.info('Refreshing tags training set from Cron job.')
self._Refresh()
else:
self.redirect('/')
def post(self):
"""Process requests to train or for training data.
Possible requests:
action=refresh: force a new training set for all tags with sufficient
Examples. Creates models as needed.
action=csv: download a comma-separated version of the given model.
"""
action = self.request.get('action')
model_name = self.request.get('model_name')
if action == 'csv':
self._DownloadCSV(model_name)
elif action == 'refresh':
self._Refresh()
self.redirect('/')
def main():
run_wsgi_app(webapp.WSGIApplication([
('/tasks/train', Trainer),
('/tasks/suggest', Suggester)]))
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configures all page handlers for the application."""
from google.appengine.dist import use_library
use_library('django', '1.2')
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from landing import LandingPage
from user_settings import UserSettingsPage
application = webapp.WSGIApplication(
[
('/', LandingPage),
('/settings', UserSettingsPage),
],
debug=True)
def main():
"""Runs the application."""
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datastore reset page for the application."""
from datetime import datetime
import re
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import model
class DatastoreResetPage(webapp.RequestHandler):
"""Handler for datastore reset requests."""
def post(self):
"""Resets the datastore."""
# Ignore if not local.
if not re.search('(appspot)', self.request.host):
for incident in model.Incident.all():
incident.delete()
for message in model.Message.all():
message.delete()
user = users.get_current_user()
# Create an incident for the user.
self.CreateIncident('Incident for ' + user.nickname(), user.nickname())
# Creates an unassigned incident.
self.CreateIncident('Unassigned incident')
# Creates an incident assigned to 'some_user' if one doesn't exist.
if user.nickname() is not 'some_user':
self.CreateIncident('Incident for some_user',
owner='some_user')
# Creates an incident with the accepted tag of 'API-Test'.
self.CreateIncident('API-Test', accepted_tags=['API-Test'])
# Creates an incident with the accepted tag of 'Special-ToAssignTag'.
self.CreateIncident('To assign tag',
accepted_tags=['Special-ToAssignTag'])
# Creates an incident to be resolved.
self.CreateIncident('To resolve', accepted_tags=['Special-ToResolve'])
# Creates a resolved incident.
self.CreateIncident('Resolved', status='resolved')
def CreateIncident(self, title, owner='none', accepted_tags=None,
suggested_tags=None, status='new'):
"""Creates an incident with limited customization.
Args:
title: Title of the incident
owner: Optionally specifies the owner of the incident.
accepted_tags: Optional list of accepted_tags applied to the incident.
suggested_tags: Optional list of suggested_tags applied to the incident.
status: Optional string status for the new incident.
"""
# Set empty tags outside of the default constructor, in case we ever need
# to modify these later.
if not accepted_tags:
accepted_tags = []
if not suggested_tags:
suggested_tags = []
incident = model.Incident()
incident.title = title
incident.created = datetime.now()
incident.status = status
incident.owner = owner
incident.author = 'test@example.com'
incident.mailing_list = 'support@example.com'
incident.canonical_link = 'http://google.com'
incident.suggested_tags = suggested_tags
incident.accepted_tags = accepted_tags
incident.put()
self.CreateMessages(incident)
def CreateMessages(self, incident):
"""Creates messages associated with the supplied incident.
Args:
incident: Incident to which messages should be appended.
"""
in_reply_to = None
for j in range(2):
message = model.Message()
message.title = 'Message #' + str(j)
message.incident = incident
message.in_reply_to = in_reply_to
message.message_id = 'message-%s-%s' % (incident.key, str(j))
message.author = 'text@example.com'
message.body = 'Text'
message.sent = datetime.now()
message.mailing_list = 'support@example.com'
message.canonical_link = 'http://google.com'
message.put()
in_reply_to = message.message_id
application = webapp.WSGIApplication(
[
('/ds_reset', DatastoreResetPage)
],
debug=True)
def main():
"""Runs the application."""
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides utility functions for Tasks API."""
import logging
import traceback
from apiclient import errors
from apiclient.discovery import build
from oauth2client.appengine import StorageByKeyName
import httplib2
import model
def _BuildClient(credentials):
"""Build a Tasks client.
Args:
credentials: Credentials used to authorized requests.
Returns:
Tasks API client.
"""
http = httplib2.Http()
if credentials:
http = credentials.authorize(http)
return build('tasks', 'v1', http=http)
def _GetCredentialsAndSettings(user_email):
"""Retrieve the user's credentials and settings.
Args:
user_email: Email of the user to retrieve settings and credentials for.
Returns:
User's credentials and settings as a tuple.
"""
settings = model.UserSettings.get_by_key_name(user_email)
credentials = None
if settings:
credentials = StorageByKeyName(
model.UserSettings, user_email, 'tasks_credentials').get()
return credentials, settings
def AddTask(incident, client=None):
"""Retrieve the owner's settings and add a task if requested.
Args:
incident: Incident to add the task for.
client: TasksClient to use for Tasks API requests.
"""
if incident.owner and incident.owner != 'none':
credentials, settings = _GetCredentialsAndSettings(incident.owner)
if credentials and settings.add_to_tasks:
client = client or _BuildClient(credentials)
key_name = '%s' % incident.key().id()
incident_task = model.IncidentTask.get_by_key_name(key_name)
body = incident.ToTaskDict()
try:
task = client.tasks().insert(
tasklist=settings.task_list_id, body=body).execute()
except errors.HttpError:
logging.error(
'Exception occured inserting task for incident %s',
incident.key().id())
logging.error(traceback.format_exc()[:-1])
if incident_task:
# Task could not be added, remove reference.
incident_task.delete()
else:
if not incident_task:
incident_task = model.IncidentTask(key_name=key_name)
incident_task.task_id = task['id']
incident_task.task_list_id = settings.task_list_id
incident_task.owner = incident.owner
incident_task.put()
def RemoveTask(incident, client=None):
"""Retrieve the owner's settings and delete the incdent's task if existing.
Args:
incident: Incident to remove the task for.
client: TasksClient to use for Tasks API requests.
"""
incident_task = model.IncidentTask.get_by_key_name(
'%s' % incident.key().id())
if incident_task:
credentials, settings = _GetCredentialsAndSettings(incident_task.owner)
if credentials and settings.add_to_tasks:
client = client or _BuildClient(credentials)
try:
client.tasks().delete(
tasklist=incident_task.task_list_id,
task=incident_task.task_id).execute()
except errors.HttpError:
logging.error(
'Exception occured while deleting task %s - %s',
incident_task.task_list_id, incident_task.task_id)
logging.error(traceback.format_exc()[:-1])
else:
logging.warning(
'No owner or credentials found for IncidentTask %s',
incident.key().id())
incident_task.delete()
else:
logging.warning(
'No IncidentTask found for incident %s', incident.key().id())
def UpdateTask(incident, old_client=None, new_client=None):
"""Update Task information on an updated incident.
Args:
incident: New version of the incident.
old_client: TasksClient to use for Tasks API requests.
new_client: TasksClient to use for Tasks API requests.
"""
incident_task = model.IncidentTask.get_by_key_name(
'%s' % incident.key().id())
if not incident_task:
AddTask(incident, new_client)
else:
old_credentials = _GetCredentialsAndSettings(incident_task.owner)[0]
old_client = old_client or _BuildClient(old_credentials)
if incident_task.owner == incident.owner:
if old_credentials:
try:
old_task = old_client.tasks().get(
tasklist=incident_task.task_list_id,
task=incident_task.task_id).execute()
old_task = incident.ToTaskDict(old_task)
old_client.tasks().update(
tasklist=incident_task.task_list_id, task=incident_task.task_id,
body=old_task).execute()
except errors.HttpError:
logging.error(
'Exception occured while retrieving or updating task %s - %s',
incident_task.task_list_id, incident_task.task_id)
logging.error(traceback.format_exc()[:-1])
else:
logging.warning(
'No credentials found for IncidentTask #%s',
incident_task.key().id())
incident_task.delete()
else:
# If the owner changed, delete the task for the previous owner.
if old_credentials:
try:
old_client.tasks().delete(
tasklist=incident_task.task_list_id,
task=incident_task.task_id).execute()
except errors.HttpError:
logging.error(
'Exception occured while deleting task %s - %s',
incident_task.task_list_id, incident_task.task_id)
logging.error(traceback.format_exc()[:-1])
else:
logging.warning(
'No credentials found for IncidentTask #%s',
incident_task.key().id())
new_credentials, new_settings = _GetCredentialsAndSettings(
incident.owner)
if new_credentials and new_settings.add_to_tasks:
AddTask(incident, new_client)
else:
incident_task.delete()
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Simple, extendable, mockable Python client for Google Storage.
This module only depends on standard Python libraries. It is intended to provide
a set of base client classes with all critical features implemented. Advanced
features can be added by extending the classes. Or, it can be used as-is.
Installation:
Put this script in your python path.
Usage:
1) Get a Google Storage account and credentials.
2) Put this script in your Python path.
2) Decide how you will store your credentials (private file, environment
variables, etc...).
3) Create a GsClient or child instance, passing credentials to constructor.
4) Use the relevant functions on the client
URL Encoding:
Users of this module do not need to URL encode/decode any request arguments
or response results.
Object names and query parameters may contain characters that are illegal
URL characters. So, all object name and query parameter values are
percent encoded by this module before sending the request. This is important
to understand since you do not want to encode your strings twice.
It is also important to understand that all object names and prefixes
found in ListBucketResult responses will not be encoded.
Handling Errors:
Google Storage service errors will be raised as GsError exceptions.
Other connection errors may get raised as httplib.HTTPException errors.
Windows Considerations:
When opening files, you must specify binary mode, like this:
infile = open(filename, 'rb')
outfile = open(filename, 'wb')
Example where credentials are in GS_ACCESS and GS_SECRET env vars:
$ python
>>> import os
>>> import gslite
>>> gs_access = os.environ['GS_ACCESS']
>>> gs_secret = os.environ['GS_SECRET']
>>> bucket = 'my_super_cool_bucket_name'
>>> filename = 'hello.txt'
>>> client = gslite.GsClient(access_key=gs_access, secret=gs_secret)
>>> client.put_bucket(bucket)
>>> infile = open(filename)
>>> client.put_object(bucket, filename, infile)
>>> infile.close()
>>> client.get_bucket(bucket).get_keys()
['hello.txt']
>>> client.delete_object(bucket, filename)
>>> client.delete_bucket(bucket)
"""
__version__ = '1.0'
import base64
import hashlib
import hmac
import httplib
import logging
import os
import StringIO
import time
import urllib
import urlparse
import xml.dom.minidom
# Success and retryable status codes.
REDIRECT_CODES = (301, 302, 303, 307)
DEFAULT_SUCCESS_CODES = (200,)
DEFAULT_RETRYABLE_CODES = (408, 500, 502, 503, 504)
GET_OBJECT_SUCCESS_CODES = (200, 206)
DEL_BUCKET_SUCCESS_CODES = (204,)
DEL_BUCKET_RETRYABLE_CODES = (404, 408, 409, 500, 502, 503, 504)
DEL_OBJECT_SUCCESS_CODES = (204,)
class GsError(Exception):
"""Base error for all client errors.
Instance data:
msg: error message
operations: list of operations associated with error
"""
def __init__(self, msg, operations):
"""GsError constructor.
Args:
msg: message string
operations: list of operations associated with error.
"""
self.msg = msg
self.operations = operations
def __str__(self):
"""Convert instance to loggable string."""
s = StringIO.StringIO()
s.write('GsError: %s' % self.msg)
for i in xrange(len(self.operations)):
s.write('\n\nOPERATION %d:' % i)
s.write('\n%s' % self.operations[i])
return s.getvalue()
class GsXmlBase(object):
"""Base XML oject parser/generator."""
@staticmethod
def value_from_elems(elems):
"""Returns a child node text value in the last element in elems.
Args:
elems: A list of Element objects from the xml.dom.minidom module.
Returns:
String value of last node or empty string if not found.
"""
ret = ''
if elems:
child_nodes = elems[-1].childNodes
if child_nodes:
ret = child_nodes[-1].nodeValue
return str(ret)
@staticmethod
def add_text_node(dom, parent_node, node_name, node_text):
"""Adds a simple text node to a parent node.
Args:
dom: dom object from xml.dom.minidom module.
parent_node: Parent Node object from the xml.dom.minidom module.
node_name: Name of new child node
node_text: Text content of new node.
"""
elem = dom.createElement(node_name)
text = dom.createTextNode(node_text)
elem.appendChild(text)
parent_node.appendChild(elem)
class GsAccessControlList(GsXmlBase):
"""AccessControlList XML parser/generator.
See the Google Storage API documentation for more information about the
AccessControlList XML specification.
Instance data:
owner_id: owner google storage id as string
owner_name: owner name as string
entries: list of GsAccessControlList.Entry instances
"""
class Entry(object):
"""Entry class corresponding to like named element.
Instance data:
permission: permission as string ('READ', 'WRITE', etc...)
scope_type: scope type as string ('UserById', etc...)
scope_user_id: scope user google storage id as string
scope_user_name: scope user name as string
scope_email: scope user email address as string
scope_domain: scope domain as string
"""
def __init__(self,
permission='',
scope_type='',
scope_user_id='',
scope_user_name='',
scope_email='',
scope_domain=''):
"""Entry Constructor.
Args:
permission: permission as string ('READ', 'WRITE', etc...)
scope_type: scope type as string ('UserById', etc...)
scope_user_id: scope user google storage id as string
scope_user_name: scope user name as string
scope_email: scope user email address as string
scope_domain: scope domain as string
"""
self.permission = permission
self.scope_type = scope_type
self.scope_user_id = scope_user_id
self.scope_user_name = scope_user_name
self.scope_email = scope_email
self.scope_domain = scope_domain
def __init__(self, owner_id='', owner_name=''):
"""GsAccessControlList Constructor.
Args:
owner_id: owner google storage id as string
owner_name: owner name as string
"""
self.owner_id = owner_id
self.owner_name = owner_name
self.entries = []
def add_entry(self,
permission='',
scope_type='',
scope_user_id='',
scope_user_name='',
scope_email='',
scope_domain=''):
"""Adds an entry to the acl.
Args:
permission: permission as string ('READ', 'WRITE', etc...)
scope_type: scope type as string ('UserById', etc...)
scope_user_id: scope user google storage id as string
scope_user_name: scope user name as string
scope_email: scope user email address as string
scope_domain: scope domain as string
"""
self.entries.append(GsAccessControlList.Entry(
permission=permission,
scope_type=scope_type,
scope_user_id=scope_user_id,
scope_user_name=scope_user_name,
scope_email=scope_email,
scope_domain=scope_domain))
def parse_xml(self, xml_str):
"""Parses the given xml string to this object.
Args:
xml_str: AccessControlList XML as string
"""
self.owner_id = ''
self.owner_name = ''
self.entries = []
dom = xml.dom.minidom.parseString(xml_str)
owner_elems = dom.getElementsByTagName('Owner')
for owner_elem in owner_elems:
self.owner_id = self.value_from_elems(
owner_elem.getElementsByTagName('ID'))
self.owner_name = self.value_from_elems(
owner_elem.getElementsByTagName('Name'))
entries_elems = dom.getElementsByTagName('Entries')
for entries_elem in entries_elems:
entry_elems = entries_elem.getElementsByTagName('Entry')
for entry_elem in entry_elems:
entry = GsAccessControlList.Entry()
entry.permission = self.value_from_elems(
entry_elem.getElementsByTagName('Permission'))
scope_elems = entry_elem.getElementsByTagName('Scope')
for scope_elem in scope_elems:
entry.scope_type = scope_elem.getAttribute('type')
entry.scope_user_id = self.value_from_elems(
scope_elem.getElementsByTagName('ID'))
entry.scope_user_name = self.value_from_elems(
scope_elem.getElementsByTagName('Name'))
entry.scope_email = self.value_from_elems(
scope_elem.getElementsByTagName('EmailAddress'))
entry.scope_domain = self.value_from_elems(
scope_elem.getElementsByTagName('Domain'))
self.entries.append(entry)
def to_xml(self, pretty=False):
"""Translates this acl object to XML string.
Args:
pretty: if True, output will use dom.toprettyxml
Returns:
AccessControlList XML as string
"""
impl = xml.dom.minidom.getDOMImplementation()
dom = impl.createDocument(None, 'AccessControlList', None)
top_elem = dom.documentElement
if self.owner_id or self.owner_name:
owner_elem = dom.createElement('Owner')
if self.owner_id:
self.add_text_node(dom, owner_elem, 'ID', self.owner_id)
if self.owner_name:
self.add_text_node(dom, owner_elem, 'Name', self.owner_name)
top_elem.appendChild(owner_elem)
if self.entries:
entries_elem = dom.createElement('Entries')
for entry in self.entries:
entry_elem = dom.createElement('Entry')
if entry.permission:
self.add_text_node(dom,
entry_elem,
'Permission',
entry.permission)
if (entry.scope_type or
entry.scope_user_id or
entry.scope_user_name or
entry.scope_email or
entry.scope_domain):
scope_elem = dom.createElement('Scope')
if entry.scope_type:
scope_elem.setAttribute('type', entry.scope_type)
if entry.scope_user_id:
self.add_text_node(dom,
scope_elem,
'ID',
entry.scope_user_id)
if entry.scope_user_name:
self.add_text_node(dom,
scope_elem,
'Name',
entry.scope_user_name)
if entry.scope_email:
self.add_text_node(dom,
scope_elem,
'EmailAddress',
entry.scope_email)
if entry.scope_domain:
self.add_text_node(dom,
scope_elem,
'Domain',
entry.scope_domain)
entry_elem.appendChild(scope_elem)
entries_elem.appendChild(entry_elem)
top_elem.appendChild(entries_elem)
if pretty:
return dom.toprettyxml(indent=' ')
return dom.toxml()
class GsListAllMyBucketsResult(GsXmlBase):
"""ListAllMyBucketsResult XML parser.
See the Google Storage API documentation for more information about the
ListAllMyBucketsResult XML specification.
Instance data:
owner_id: owner google storage id as string
owner_display_name: owner name as string
bucket_list: list of GsListAllMyBucketsResult.Bucket instances
"""
class Bucket(object):
"""Bucket class corresponding to like named element.
Instance data:
name: bucket name as string
creation_date: bucket creation date as string
"""
def __init__(self):
"""Bucket constructor."""
self.name = ''
self.creation_date = ''
def __init__(self):
"""GsListAllMyBucketsResult constructor."""
self.owner_id = ''
self.owner_display_name = ''
self.bucket_list = []
def parse_xml(self, xml_str):
"""Parses the given xml string to this object.
Args:
xml_str: ListAllMyBucketsResult XML as string
"""
self.owner_id = ''
self.owner_display_name = ''
self.bucket_list = []
dom = xml.dom.minidom.parseString(xml_str)
owner_elems = dom.getElementsByTagName('Owner')
for owner_elem in owner_elems:
self.owner_id = self.value_from_elems(
owner_elem.getElementsByTagName('ID'))
self.owner_display_name = self.value_from_elems(
owner_elem.getElementsByTagName('DisplayName'))
buckets_elems = dom.getElementsByTagName('Buckets')
for buckets_elem in buckets_elems:
bucket_elems = buckets_elem.getElementsByTagName('Bucket')
for bucket_elem in bucket_elems:
bucket = GsListAllMyBucketsResult.Bucket()
bucket.name = self.value_from_elems(
bucket_elem.getElementsByTagName('Name'))
bucket.creation_date = self.value_from_elems(
bucket_elem.getElementsByTagName('CreationDate'))
self.bucket_list.append(bucket)
def get_bucket_names(self):
"""Returns the list of bucket names from self.bucket_list."""
return [b.name for b in self.bucket_list]
class GsListBucketResult(GsXmlBase):
"""ListBucketResult XML parser.
See the Google Storage API documentation for more information about the
ListBucketResult XML specification.
Instance data:
name: bucket name as string
prefix: prefix specified in request as string
marker: marker specified in request as string
is_truncated: "true" if all objects in bucket were returned
contents_list: list of GsListBucketResult.Contents instances
common_prefixes: list of <CommonPrefixes>.<Prefix> names as strings
"""
class Contents(object):
"""Contents class corresponding to like named element.
Instance data:
key: object name as string
last_modified: time object last modified as string
etag: object data etag value as string
size: object size as string
storage_class: object storage class as string
owner_id: object owner google storage id as string
owner_display_name: object owner name as string
"""
def __init__(self):
"""Contents constructor."""
self.key = ''
self.last_modified = ''
self.etag = ''
self.size = ''
self.storage_class = ''
self.owner_id = ''
self.owner_display_name = ''
def __init__(self):
"""GsListBucketResult constructor."""
self.name = ''
self.prefix = ''
self.marker = ''
self.is_truncated = ''
self.contents_list = []
self.common_prefixes = []
def parse_xml(self, xml_str):
"""Parses the given xml string to this object.
Args:
xml_str: ListBucketResult XML as string
"""
self.contents_list = []
self.common_prefixes = []
dom = xml.dom.minidom.parseString(xml_str)
self.name = self.value_from_elems(dom.getElementsByTagName('Name'))
self.prefix = self.value_from_elems(dom.getElementsByTagName('Prefix'))
self.marker = self.value_from_elems(dom.getElementsByTagName('Marker'))
self.is_truncated = self.value_from_elems(
dom.getElementsByTagName('IsTruncated'))
contents_elems = dom.getElementsByTagName('Contents')
for contents_elem in contents_elems:
contents = GsListBucketResult.Contents()
contents.key = self.value_from_elems(
contents_elem.getElementsByTagName('Key'))
contents.last_modified = self.value_from_elems(
contents_elem.getElementsByTagName('LastModified'))
contents.etag = self.value_from_elems(
contents_elem.getElementsByTagName('ETag'))
contents.size = self.value_from_elems(
contents_elem.getElementsByTagName('Size'))
contents.storage_class = self.value_from_elems(
contents_elem.getElementsByTagName('StorageClass'))
owner_elems = contents_elem.getElementsByTagName('Owner')
for owner_elem in owner_elems:
contents.owner_id = self.value_from_elems(
owner_elem.getElementsByTagName('ID'))
contents.owner_display_name = self.value_from_elems(
owner_elem.getElementsByTagName('DisplayName'))
self.contents_list.append(contents)
common_prefixes_elems = dom.getElementsByTagName('CommonPrefixes')
for common_prefixes_elem in common_prefixes_elems:
prefix_elems = common_prefixes_elem.getElementsByTagName('Prefix')
for prefix_elem in prefix_elems:
self.common_prefixes.append(prefix_elem.childNodes[0].nodeValue)
def get_keys(self):
"""Returns the list of object names found in self.contents_list."""
return [c.key for c in self.contents_list]
class GsOperation(object):
"""Class to hold the important details of an HTTP request and response.
Instance data:
connection_host: host name connected to as string
connection_port: host port connected to as int
request_method: http request method ('GET', 'PUT', etc...) as string
request_path_and_query: request URL path and query as string
request_headers: request headers as dict
response_status: response http status as int
response_headers: response headers as dict
response_error_body: response error body as string
"""
def __init__(self):
"""GsOperation constructor."""
self.connection_host = ''
self.connection_port = 80
self.request_method = ''
self.request_path_and_query = ''
self.request_headers = None
self.response_status = 0
self.response_headers = None
self.response_error_body = None
def __str__(self):
"""Convert instance to loggable string."""
s = StringIO.StringIO()
s.write('REQUEST:')
s.write('\nSent to host: %s:%d' % (self.connection_host,
self.connection_port))
s.write('\n%s %s' % (self.request_method, self.request_path_and_query))
if self.request_headers:
for k, v in self.request_headers.iteritems():
s.write('\n%s: %s' % (k, v))
s.write('\nRESPONSE:')
s.write('\n%d' % self.response_status)
if self.response_headers:
for k, v in self.response_headers.iteritems():
s.write('\n%s: %s' % (k, v))
if self.response_error_body:
s.write('\n')
s.write(self.response_error_body)
return s.getvalue()
class GsClient(object):
"""Google Storage client.
Instance data:
access_key: google storage access key as string for authentication
secret: google storage secret key as string for authentication
host: google storage host as string
proxy_host: optional proxy host
proxy_port: optional proxy port
auth_id: authentication type as string
max_retries: max num retries for retryable errors
max_redirects: max num redirects to follow
operations: list of GsOperation instances for most recent request
Note that each retry or redirection will append to this list.
backoff_exponent: current backoff exponent during failures
"""
def __init__(self,
access_key=None,
secret=None,
host='commondatastorage.googleapis.com',
proxy_host=None,
proxy_port=80,
auth_id='GOOG1',
max_retries=5,
max_redirects=10):
"""GsClient constructor.
Args:
access_key: google storage access key as string for authentication
secret: google storage secret key as string for authentication
host: google storage host as string
proxy_host: optional proxy host
proxy_port: optional proxy port
auth_id: authentication type as string
max_retries: max num retries for retryable errors
max_redirects: max num redirects to follow
"""
self.access_key = access_key
self.secret = secret
self.host = host
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.auth_id = auth_id
self.max_retries = max_retries
self.max_redirects = max_redirects
self.operations = []
self.backoff_exponent = -1
def get_service(self):
"""GET Service.
Returns:
GsListAllMyBucketsResult instance
"""
outfile = StringIO.StringIO()
self.send_request('GET', outfile=outfile)
result = GsListAllMyBucketsResult()
result.parse_xml(outfile.getvalue())
return result
def get_bucket(self,
bucket,
query_parameters=None):
"""GET Bucket.
Args:
bucket: bucket name as string
query_parameters: query parameters as dict
Returns:
GsListBucketResult instance
"""
outfile = StringIO.StringIO()
self.send_request('GET',
bucket=bucket,
outfile=outfile,
query_parameters=query_parameters)
result = GsListBucketResult()
result.parse_xml(outfile.getvalue())
return result
def get_bucket_acl(self,
bucket):
"""GET Bucket ACL.
Args:
bucket: bucket name as string
Returns:
GsAccessControlList instance
"""
outfile = StringIO.StringIO()
self.send_request('GET',
bucket=bucket,
outfile=outfile,
query_parameters={'acl': None})
acl = GsAccessControlList()
acl.parse_xml(outfile.getvalue())
return acl
def get_object(self,
bucket,
key,
outfile,
extra_headers=None,
query_parameters=None,
chunk_size=0):
"""GET Object.
Args:
bucket: bucket name as string
key: object name as string
outfile: an open file-like object
Only success responses will be written to this file.
Error resonses will be found in the operation objects
extra_headers: optional request headers as dict
query_parameters: optional query parameters as dict
chunk_size: size of each socket read (default of 0 = read all)
"""
self.send_request('GET',
bucket=bucket,
key=key,
outfile=outfile,
extra_headers=extra_headers,
query_parameters=query_parameters,
chunk_size=chunk_size,
success_status_codes=GET_OBJECT_SUCCESS_CODES)
def get_object_acl(self,
bucket,
key):
"""GET Object ACL.
Args:
bucket: bucket name as string
key: object name as string
Returns:
GsAccessControlList instance
"""
outfile = StringIO.StringIO()
self.send_request('GET',
bucket=bucket,
key=key,
outfile=outfile,
query_parameters={'acl': None})
acl = GsAccessControlList()
acl.parse_xml(outfile.getvalue())
return acl
def head_object(self,
bucket,
key,
extra_headers=None):
"""HEAD Object.
Args:
bucket: bucket name as string
key: object name as string
extra_headers: optional request headers as dict
Returns:
response headers as dict
"""
self.send_request('HEAD',
bucket=bucket,
key=key,
extra_headers=extra_headers)
return self.operations[-1].response_headers
def put_bucket(self,
bucket,
infile=None,
extra_headers=None,
query_parameters=None):
"""PUT Bucket.
Args:
bucket: bucket name as string
infile: an open file-like object
data in this file will be written to the http socket
extra_headers: optional request headers as dict
query_parameters: optional query parameters as dict
"""
self.send_request('PUT',
bucket=bucket,
infile=infile,
extra_headers=extra_headers,
query_parameters=query_parameters)
def put_bucket_acl(self,
bucket,
acl):
"""PUT Bucket ACL.
Args:
bucket: bucket name as string
acl: GsAccessControlList instance
"""
infile = StringIO.StringIO(acl.to_xml())
self.put_bucket(bucket,
infile=infile,
query_parameters={'acl': None})
def put_object(self,
bucket,
key,
infile,
extra_headers=None,
query_parameters=None,
chunk_size=0):
"""PUT Object.
Args:
bucket: bucket name as string
key: object name as string
infile: an open file-like object
data in this file will be written to the http socket
extra_headers: optional request headers as dict
query_parameters: optional query parameters as dict
chunk_size: size of each socket write (default of 0 = write all)
"""
self.send_request('PUT',
bucket=bucket,
key=key,
infile=infile,
extra_headers=extra_headers,
query_parameters=query_parameters,
chunk_size=chunk_size)
def put_object_acl(self,
bucket,
key,
acl):
"""PUT Object ACL.
Args:
bucket: bucket name as string
key: object name as string
acl: GsAccessControlList instance
"""
infile = StringIO.StringIO(acl.to_xml())
self.put_object(bucket,
key,
infile,
query_parameters={'acl': None})
def delete_bucket(self,
bucket):
"""DELETE Bucket.
Args:
bucket: bucket name as string
"""
self.send_request(
'DELETE',
bucket=bucket,
success_status_codes=DEL_BUCKET_SUCCESS_CODES,
retryable_status_codes=DEL_BUCKET_RETRYABLE_CODES)
def delete_object(self,
bucket,
key):
"""DELETE Object.
Args:
bucket: bucket name as string
key: object name as string
"""
self.send_request('DELETE',
bucket=bucket,
key=key,
success_status_codes=DEL_OBJECT_SUCCESS_CODES)
def send_request(self,
http_method,
bucket=None,
key=None,
infile=None,
outfile=None,
extra_headers=None,
query_parameters=None,
chunk_size=0,
success_status_codes=DEFAULT_SUCCESS_CODES,
retryable_status_codes=DEFAULT_RETRYABLE_CODES):
"""Sends the specifed request.
Retries and follows redirection as necessary.
Args:
http_method: http method as string ('GET', 'PUT', etc...)
bucket: bucket name as string
key: object name as string
infile: an open file-like object
data in this file will be written to the http socket
outfile: an open file-like object
Only success responses will be written to this file.
Error resonses will be found in the operation objects
extra_headers: optional request headers as dict
query_parameters: optional query parameters as dict
chunk_size: size of each socket read/write (default of 0 = all)
success_status_codes: response status codes considered success
retryable_status_codes: response status codes considered retryable
Returns:
self.operations: the list of operations executed for this request.
"""
self.operations = []
operation = None
redirect_location = None
retries = 0
redirects = 0
while retries <= self.max_retries and redirects <= self.max_redirects:
# Need backoff sleep?
if self.backoff_exponent >= 0:
self._backoff_sleep()
# Prepare operation
if redirect_location:
operation = self._create_redirect_operation(
operation, redirect_location)
redirect_location = None
else:
operation = self._create_init_operation(
http_method,
bucket=bucket,
key=key,
extra_headers=extra_headers,
query_parameters=query_parameters,
infile=infile)
# Execute operation
try:
operation = self._exec_operation(
operation,
infile=infile,
outfile=outfile,
chunk_size=chunk_size,
success_status_codes=success_status_codes)
except httplib.IncompleteRead, e:
operation.response_error_body = (
'IncompleteRead: %d bytes read' % (e.partial))
retries += 1
self._backoff_increment()
continue
finally:
self.operations.append(operation)
# Check for success
if operation.response_status in success_status_codes:
self._backoff_decrement()
return self.operations
# Check for redirect
elif operation.response_status in REDIRECT_CODES:
self._backoff_decrement()
redirect_location = operation.response_headers['location']
redirects += 1
logging.debug('Redirected to %s', redirect_location)
continue
# Check for retryable failures
elif operation.response_status in retryable_status_codes:
self._backoff_increment()
retries += 1
continue
else:
self._backoff_increment()
break
raise GsError('Service Failure', self.operations)
def _exec_operation(self,
operation,
infile=None,
outfile=None,
chunk_size=0,
success_status_codes=DEFAULT_SUCCESS_CODES):
"""Executes given operation request, and populates response."""
connection = None
try:
logging.debug('%s %s %s',
operation.request_method,
operation.request_path_and_query,
str(operation.request_headers))
# Connect
connection = self._connect(operation.connection_host,
operation.connection_port)
# Write the first line of the request
self._put_request(connection,
operation.request_method,
operation.request_path_and_query)
# Write the headers
self._put_headers(connection,
operation.request_headers)
# Write the data
if infile:
self._write(connection, infile, chunk_size)
else:
# Flush the header write with no body
connection.send('')
# Get the response
response = connection.getresponse()
# Get the status
operation.response_status = response.status
# Read the response headers
operation.response_headers = {}
operation.response_headers.update(response.getheaders())
# Read the response data (not for HEAD)
if operation.request_method != 'HEAD':
# Don't put data in outfile unless success status
if operation.response_status in success_status_codes:
if outfile:
self._read(response, outfile, chunk_size)
# Read the error body
else:
operation.response_error_body = response.read()
finally:
if connection:
self._close(connection)
return operation
def _create_init_operation(self,
http_method,
bucket=None,
key=None,
extra_headers=None,
query_parameters=None,
infile=None):
"""Inits a new operation with request fields."""
op = GsOperation()
if self.proxy_host:
op.connection_host = self.proxy_host
op.connection_port = self.proxy_port
else:
op.connection_host = self.host
op.connection_port = 80
op.request_method = http_method
path = self._get_path(bucket, key)
query_string = self._get_query_string(query_parameters)
op.request_path_and_query = path + query_string
op.request_headers = self._get_request_headers(
http_method,
path,
query_parameters,
extra_headers,
infile)
return op
def _create_redirect_operation(self,
previous_operation,
location):
"""Creates a new op based on the last op and the redirection."""
parts = urlparse.urlparse(location)
op = GsOperation()
if self.proxy_host:
op.connection_host = self.proxy_host
op.connection_port = self.proxy_port
else:
host_and_port = parts.netloc.split(':')
op.connection_host = host_and_port[0]
if len(host_and_port) > 1:
op.connection_port = int(host_and_port[1])
else:
op.connection_port = 80
op.request_method = previous_operation.request_method
op.request_path_and_query = parts.path
if parts.query:
op.request_path_and_query += '?%s' % parts.query
op.request_headers = previous_operation.request_headers.copy()
op.request_headers['Host'] = parts.netloc # host and optional port
return op
def _backoff_decrement(self):
"""Decrements the backoff exponent toward min of -1 (off)."""
if self.backoff_exponent > -1:
self.backoff_exponent -= 1
def _backoff_increment(self):
"""Increments the backoff exponent toward max of 5."""
if self.backoff_exponent < 5:
self.backoff_exponent += 1
def _backoff_sleep(self):
"""Backoff sleep function called between retry attempts.
See Google Storage docs for required exponential backoff
when errors occur.
Override this if you want it to do more.
"""
sleep_sec = 1 << self.backoff_exponent
logging.debug('Backoff sleep, retrying in %d seconds...', sleep_sec)
time.sleep(sleep_sec)
def _connect(self, host, port):
"""Returns a connection object.
Override this if you have an alternate connection implementation.
"""
return httplib.HTTPConnection(host, port=port)
def _close(self, connection):
"""Closes the connection.
Override this if you want it to do more.
"""
connection.close()
def _put_request(self,
connection,
http_method,
path_and_query):
"""Sends the method, path, and query to the connection.
Override this if you want it to do more.
"""
connection.putrequest(http_method,
path_and_query,
skip_host=True,
skip_accept_encoding=True)
def _put_headers(self,
connection,
headers):
"""Sends the request headers to the connection.
Override this if you want it to do more.
"""
for name, val in headers.iteritems():
connection.putheader(name, val)
connection.endheaders()
def _write(self, connection, infile, chunk_size):
"""Writes data in infile to the open connection.
Override this if you want it to do more.
Perhaps for performance measuring or periodic callbacks.
"""
infile.seek(0)
if chunk_size > 0:
while True:
chunk = infile.read(chunk_size)
if chunk:
connection.send(chunk)
else:
break
else:
connection.send(infile.read())
def _read(self, response, outfile, chunk_size):
"""Reads data from response, and writes it to outfile.
Override this if you want it to do more.
Perhaps for performance measuring or periodic callbacks.
"""
if chunk_size > 0:
while True:
chunk = response.read(chunk_size)
if chunk:
outfile.write(chunk)
else:
break
else:
outfile.write(response.read())
outfile.flush()
def _get_request_headers(self,
http_method,
path,
query_parameters,
extra_headers,
infile):
"""Returns the request header dict based on args."""
headers = {}
# Content-Length
if infile:
infile.seek(0, os.SEEK_END)
headers['Content-Length'] = infile.tell()
else:
headers['Content-Length'] = '0'
# Date
headers['Date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime())
# Host
headers['Host'] = self.host
# User-Agent
headers['User-Agent'] = 'gslite/' + __version__
# Add extra headers
if extra_headers:
headers.update(extra_headers)
# Authorization
if self.access_key and self.secret:
headers['Authorization'] = self._get_authentication(
http_method,
path,
query_parameters,
headers)
return headers
def _get_path(self, bucket, key):
"""Returns the URL path based on args."""
s = StringIO.StringIO()
s.write('/')
if bucket:
s.write(urllib.quote(bucket))
if key:
s.write('/')
s.write(urllib.quote(key))
return s.getvalue()
def _get_query_string(self, query_parameters):
"""Returns the URL query string based on query dict."""
s = StringIO.StringIO()
if query_parameters:
s.write('?')
first = True
for name, val in query_parameters.iteritems():
if first:
first = False
else:
s.write('&')
s.write(name)
if val:
s.write('=%s' % urllib.quote(str(val)))
return s.getvalue()
def _get_authentication(self, http_method, path, query_parameters, headers):
"""Returns the Authorization header value based on args."""
string_to_sign = StringIO.StringIO()
# HTTP method
string_to_sign.write('%s\n' % http_method)
# Content-Md5
if 'Content-MD5' in headers:
string_to_sign.write(headers['Content-MD5'].strip())
string_to_sign.write('\n')
# Content-Type
if 'Content-Type' in headers:
string_to_sign.write(headers['Content-Type'].strip())
string_to_sign.write('\n')
# Date
if ('x-goog-date' not in headers and
'Date' in headers):
string_to_sign.write(headers['Date'])
string_to_sign.write('\n')
# Extension headers
sorted_header_keys = headers.keys()
sorted_header_keys.sort()
for header_key in sorted_header_keys:
if header_key.startswith('x-goog-'):
string_to_sign.write('%s:%s\n' % (
header_key, headers[header_key]))
# Resource
string_to_sign.write(path)
if query_parameters:
for subresource in ('acl', 'location', 'logging', 'torrent'):
if subresource in query_parameters:
string_to_sign.write('?%s' % subresource)
# should only be one of these
break
# HMAC-SHA1
h = hmac.new(self.secret, digestmod=hashlib.sha1)
h.update(string_to_sign.getvalue())
signature = base64.b64encode(h.digest())
# Put it all together
return '%s %s:%s' % (self.auth_id, self.access_key, signature)
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User settings page for the application."""
import os
import re
from apiclient.discovery import build
from oauth2client import client
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import login_required
import httplib2
import model
import settings
class UserSettingsPage(webapp.RequestHandler):
"""RequestHandler for the Admin page."""
@login_required
def get(self):
"""Display the admin page template."""
template_value = {}
user = users.get_current_user()
template_value['current_user'] = user.email()
if users.is_current_user_admin():
credentials = model.Credentials.get_by_key_name(
settings.CREDENTIALS_KEYNAME)
if credentials:
template_value['credentials_email'] = credentials.email
template_value['is_admin'] = True
else:
template_value['is_admin'] = False
# Determine whether or not the server is running locally, and offer a
# datastore reset if it's not.
if re.search('(appspot)', self.request.host):
template_value['is_local'] = False
else:
template_value['is_local'] = True
# Make a list of tags from the datastore to pass to template.
suggestion_models = model.SuggestionModel.all()
suggestion_models.order('__key__')
template_value['models'] = suggestion_models
credentials = model.Credentials.get_by_key_name(
settings.CREDENTIALS_KEYNAME)
status = {}
if credentials is not None:
credentials = credentials.credentials
http = httplib2.Http()
http = credentials.authorize(http)
service = build('prediction', 'v1.4', http=http)
try:
train = service.trainedmodels()
for suggestion_model in suggestion_models:
state = train.get(id=suggestion_model.training_file).execute()
status[suggestion_model.name] = state['trainingStatus']
except client.AccessTokenRefreshError:
status['Failed to retrieve training data'] = 'Refresh credentials'
else:
status['Add Credentials to access models'] = '...'
template_value['status'] = status
path = os.path.join(settings.TEMPLATE_BASE_PATH, 'user_settings.html')
self.response.out.write(template.render(path, template_value))
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Authorization grant page for the application."""
import logging
import os
import pickle
from oauth2client.appengine import StorageByKeyName
from oauth2client.client import OAuth2WebServerFlow
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import login_required
from google.appengine.ext.webapp.util import run_wsgi_app
import model
import settings
"""
Maps OAuth API parameter with API scope.
The current supported values are:
{
'<api_name>': {
'admin_required': Whether or not this API is "admin-only".
'scopes': The requested Google API Scopes.
'model': Datastore model used to store the credentials.
'credentials_attribute': Datastore model attribute used to store the
credentials.
'key_name': Key name to use if only one instance of this model has to be
stored at a time. Optional, this value default to the current
user ID.
"""
SCOPES = {
'prediction': {
'admin_required': True,
'scopes': ['https://www.googleapis.com/auth/prediction'],
'model': model.Credentials,
'credentials_attribute': 'credentials',
'key_name': settings.CREDENTIALS_KEYNAME
},
'tasks': {
'admin_required': False,
'scopes': ['https://www.googleapis.com/auth/tasks'],
'model': model.UserSettings,
'credentials_attribute': 'tasks_credentials'
}
}
class OAuthGrantPage(webapp.RequestHandler):
"""RequestHandler for the authorization grant page."""
@login_required
def get(self, api):
"""Handle the GET request for the OAuth grant page.
Construct the authorization grant URL and redirect the user to it.
Args:
api: Private API name to ask access for (should be a key of SCOPES).
"""
if (api not in SCOPES or
SCOPES[api]['admin_required'] and not users.is_current_user_admin()):
self.status(400)
else:
user = users.get_current_user()
logging.info('%s (%s) has entered OAuth 2.0 grant flow',
user.email(), user.user_id())
flow = OAuth2WebServerFlow(client_id=settings.CLIENT_ID,
client_secret=settings.CLIENT_SECRET,
scope=' '.join(SCOPES[api]['scopes']),
user_agent=settings.USER_AGENT,
domain=settings.DOMAIN,
state=api, access_type='offline')
callback = self.request.host_url + '/oauth2callback'
authorize_url = flow.step1_get_authorize_url(callback)
memcache.set(user.user_id() + api, pickle.dumps(flow))
self.redirect(authorize_url)
class OAuthCallbackPage(webapp.RequestHandler):
"""RequestHandler for the authorization callback page."""
@login_required
def get(self):
"""Handle the GET request for the OAuth callback page.
Get the stored user's credentials flow and request the access token to
finish the OAuth 2.0 dance.
If successful, the user's OAuth 2.0 credentials are stored in the datastore.
"""
user = users.get_current_user()
error = self.request.get('error')
api = self.request.params.get('state')
if (api not in SCOPES or
SCOPES[api]['admin_required'] and not users.is_current_user_admin()):
self.status(404)
elif error and error == 'access_denied':
logging.warning('%s (%s) has denied access to the APIs',
user.email(), user.user_id())
else:
pickled_flow = memcache.get(user.user_id() + api)
if pickled_flow:
flow = pickle.loads(pickled_flow)
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
SCOPES[api]['model'], SCOPES[api].get('key_name') or user.email(),
SCOPES[api]['credentials_attribute']).put(credentials)
if SCOPES[api].get('key_name'):
# Add the email to the datastore Credentials entry.
credentials = model.Credentials.get_by_key_name(
settings.CREDENTIALS_KEYNAME)
credentials.email = user.email()
credentials.put()
logging.info('Successfully stored OAuth 2.0 credentials for: %s (%s)',
user.email(), user.user_id())
else:
logging.warning('Unknown flow for user: %s (%s)',
user.email(), user.user_id())
self.redirect('/')
path = os.path.join(settings.TEMPLATE_BASE_PATH, 'oauth.html')
self.response.out.write(template.render(path, {}))
application = webapp.WSGIApplication(
[
('/oauth/(.*)', OAuthGrantPage),
('/oauth2callback', OAuthCallbackPage),
],
debug=True)
def main():
"""Runs the application."""
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Landing page for the application."""
import os
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from api.handler import INCIDENT_FILTERS
from settings import TEMPLATE_BASE_PATH
class LandingPage(webapp.RequestHandler):
"""Landing page handler."""
def get(self):
"""Render the landing page."""
user = users.get_current_user()
if user:
template_values = {
'owner': user.email(),
# Retrieve the list of filters to add as autocomplete params.
'filters': [x[0] for x in INCIDENT_FILTERS],
}
path = os.path.join(TEMPLATE_BASE_PATH, 'page.html')
self.response.out.write(template.render(path, template_values))
else:
self.redirect(users.create_login_url(self.request.uri))
| Python |
#!/usr/bin/env python
"""ISO 8601 date time string parsing
Basic usage:
>>> import iso8601
>>> iso8601.parse_date("2007-01-25T12:00:00Z")
datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)
>>>
"""
from datetime import datetime, timedelta, tzinfo
import re
__all__ = ["parse_date", "ParseError"]
# Adapted from http://delete.me.uk/2005/03/iso8601.html
ISO8601_REGEX = re.compile(r"(?P<year>[0-9]{4})(-(?P<month>[0-9]{1,2})(-(?P<day>[0-9]{1,2})"
r"((?P<separator>.)(?P<hour>[0-9]{2}):(?P<minute>[0-9]{2})(:(?P<second>[0-9]{2})(\.(?P<fraction>[0-9]+))?)?"
r"(?P<timezone>Z|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?"
)
TIMEZONE_REGEX = re.compile("(?P<prefix>[+-])(?P<hours>[0-9]{2}).(?P<minutes>[0-9]{2})")
class ParseError(Exception):
"""Raised when there is a problem parsing a date string"""
# Yoinked from python docs
ZERO = timedelta(0)
class Utc(tzinfo):
"""UTC
"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
UTC = Utc()
class FixedOffset(tzinfo):
"""Fixed offset in hours and minutes from UTC
"""
def __init__(self, offset_hours, offset_minutes, name):
self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
def __repr__(self):
return "<FixedOffset %r>" % self.__name
def parse_timezone(tzstring, default_timezone=UTC):
"""Parses ISO 8601 time zone specs into tzinfo offsets
"""
if tzstring == "Z":
return default_timezone
# This isn't strictly correct, but it's common to encounter dates without
# timezones so I'll assume the default (which defaults to UTC).
# Addresses issue 4.
if tzstring is None:
return default_timezone
m = TIMEZONE_REGEX.match(tzstring)
prefix, hours, minutes = m.groups()
hours, minutes = int(hours), int(minutes)
if prefix == "-":
hours = -hours
minutes = -minutes
return FixedOffset(hours, minutes, tzstring)
def parse_date(datestring, default_timezone=UTC):
"""Parses ISO 8601 dates into datetime objects
The timezone is parsed from the date string. However it is quite common to
have dates without a timezone (not strictly correct). In this case the
default timezone specified in default_timezone is used. This is UTC by
default.
"""
if not isinstance(datestring, basestring):
raise ParseError("Expecting a string %r" % datestring)
m = ISO8601_REGEX.match(datestring)
if not m:
raise ParseError("Unable to parse date string %r" % datestring)
groups = m.groupdict()
tz = parse_timezone(groups["timezone"], default_timezone=default_timezone)
if groups["fraction"] is None:
groups["fraction"] = 0
else:
groups["fraction"] = int(float("0.%s" % groups["fraction"]) * 1e6)
return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]),
int(groups["hour"]), int(groups["minute"]), int(groups["second"]),
int(groups["fraction"]), tz)
| Python |
#!/usr/bin/env python
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Aggregates individual email messages into a single incident."""
import logging
from time import strftime
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.mail_handlers import InboundMailHandler
import model
class MailAggregator(InboundMailHandler):
"""Handles incoming mail where each message is delivered individually."""
# Whether or not to save the message body on initial save.
SAVE_FULL_TEXT = True
FAKE_MESSAGE_ID = 'FAKEMESSAGEID'
# Format suitable for strftime
FAKE_MESSAGE_ID_SUFFIX_FORMAT = '%Y%m%d%H%M%S'
def receive(self, mail):
"""Handles receipt of an email message.
Args:
mail: Incoming message to parse.
"""
# Check for delivery dupes, first.
message_id = mail.original.get('Message-ID')
if message_id is None:
message_id = MailAggregator.FAKE_MESSAGE_ID + strftime(
MailAggregator.FAKE_MESSAGE_ID_SUFFIX_FORMAT)
message = model.Message.gql('WHERE message_id = :1', message_id).get()
# If there isn't already a copy, save the email.
if not message:
message = model.Message.FromMail(mail, message_id,
MailAggregator.SAVE_FULL_TEXT)
# Incident association is idempotent and can be repeated.
message.AssociateMailIncident()
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = webapp.WSGIApplication([MailAggregator.mapping()], debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides models for Au-to-do data types.
This module provides data types for Au-to-do data stored in the App Engine
datastore.
"""
from datetime import datetime
from datetime import timedelta
import email.utils
import logging
import os
import re
from sets import Set
import urllib
from oauth2client.appengine import CredentialsProperty
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import db
import simplejson
import settings
INCIDENT_DEEP_LINK = 'http://%s/#id=' % os.environ.get('HTTP_HOST', 'localhost')
class Incident(db.Model):
"""Describes an incident.
Incidents are support inquiries of one of several types. Examples include:
a thread from a mailing list (or Google Group), a Unify ticket, or a Google
Code Project Hosting issue.
Attributes:
title: Title of the incident.
created: When the incident started (or was first tracked).
updated: When the incident was last updated.
resolved: When the incident was resolved.
status: Current status of the incident (eg. waiting for customer response).
owner: Owner of the incident.
author: Person who created the incident.
mailing_list: Mailing list to which the incident was sent (if from a
mailing list).
canonical_link: Reference to the canonical location of the incident, e.g. a
the Google Group page or the Unify ticket.
suggested_tags: List of tags suggested by the Prediction API/suggester.
accepted_tags: List of tags approved or added by the user.
trained_tags: List of accepted_tags that generated training
examples. Used to detect changes in accepted_tags by the user.
trained_date: Date when this Incident was last processed for training data.
training_review: True when the training algorithm should check
this Incident for new Accepted tags or other changes related
to the Prediction API.
"""
title = db.StringProperty(multiline=True)
created = db.DateTimeProperty()
updated = db.DateTimeProperty()
resolved = db.DateTimeProperty()
status = db.StringProperty(default='new')
owner = db.StringProperty(default='none')
author = db.StringProperty()
mailing_list = db.StringProperty()
canonical_link = db.StringProperty()
suggested_tags = db.ListProperty(str)
accepted_tags = db.ListProperty(str)
trained_tags = db.ListProperty(str)
trained_date = db.DateTimeProperty()
training_review = db.BooleanProperty(default=True)
# Format used by the class for parsing dates.
ISO_FORMAT = '%Y-%m-%dT%H:%M:%S'
@staticmethod
def MergeWithParent(parent_incident, children):
"""Merges multiple child incidents into one incident with a common parent.
When child messages are delivered prior to their parent message (eg.
In-Reply-To references a message that has not yet been delivered), the
children will be roots of new incidents. Once the parent message is
delivered, this method will remove the child incidents and place them under
the common parent.
Records the current time in parent_incident.updated to help with
debounce of user input. Training may choose to ignore Incidents
which have been modified too recently because they are possibly
still under user review and editing.
Sets parent_incident.training_review to True, assuming that the
MergeWithParent might have affected the tags.
Args:
parent_incident: Parent incident, referenced by each of the children.
children: One or more messages referencing defunct parents.
"""
incidents_to_merge = Set()
for child in children:
incidents_to_merge.add(child.incident)
child.incident = parent_incident
child.put()
parent_incident.PurgeJsonCache()
parent_incident.updated = datetime.utcnow()
parent_incident.training_review = True
for incident in incidents_to_merge:
parent_incident.accepted_tags.extend(incident.accepted_tags)
parent_incident.suggested_tags.extend(incident.suggested_tags)
parent_incident.trained_tags.extend(incident.trained_tags)
parent_incident.trained_date = max([incident.trained_date,
incident.trained_date])
messages = Message.gql('WHERE incident = :1', incident.key())
for message in messages:
message.incident = parent_incident
message.put()
if not parent_incident.key() == incident.key():
incident.delete()
parent_incident.accepted_tags = list(set(parent_incident.accepted_tags))
parent_incident.suggested_tags = list(set(parent_incident.suggested_tags))
parent_incident.trained_tags = list(set(parent_incident.suggested_tags))
parent_incident.put()
def Overlay(self, other):
"""Overwrite this incident's fields with other incident's fields.
Records current time as time of update (incident.updated).
Does not overwrite messages.
Args:
other: Incident from which to pull values.
"""
self.title = other.title
self.owner = other.owner
self.status = other.status
self.created = other.created
self.updated = datetime.utcnow()
self.training_review = other.training_review
self.resolved = other.resolved
self.suggested_tags = other.suggested_tags
self.accepted_tags = other.accepted_tags
self.trained_tags = other.trained_tags
self.trained_date = other.trained_date
self.canonical_link = other.canonical_link
def GetDict(self):
"""Return a dict representation of this incident, with messages.
This will return a copy from memcache if it exists (and caching is
enabled), and update the cache if not present.
Returns:
Dict representing the incident.
"""
# Check for memcached copy first.
key = self.GetJsonModelKey()
cached = memcache.get(key)
if cached and settings.USE_MEMCACHE_FOR_JSON_MODELS:
return cached
model = {
'title': self.title,
'owner': self.owner,
'status': self.status,
'created': self.created,
'updated': self.updated,
'resolved': self.resolved,
'suggested_tags': self.suggested_tags,
'accepted_tags': self.accepted_tags,
'trained_tags': self.trained_tags,
'trained_date': self.trained_date,
'training_review': self.training_review,
'canonical_link': self.canonical_link,
'messages': []}
if self.key():
model['id'] = self.key().id()
if self.message_set is not None:
for m in self.message_set:
model['messages'].append(m.GetDict())
memcache.set(key, model)
return model
def PurgeJsonCache(self):
"""Purges the cached JSON representation of the incident."""
key = self.GetJsonModelKey()
logging.info('Purging cache for incident:' + key)
memcache.delete(key)
def GetJsonModelKey(self):
"""Returns the key pointing to the instance's JSON representation.
Returns:
JSON model key.
"""
return settings.MEMCACHE_VERSION_PREFIX + str(self.key().id())
def ToTaskDict(self, body=None):
"""Parse an incident into a Tasks API dictionary.
Args:
body: Optional dictionary to update.
Returns:
Dictionary representing the incident.
"""
body = body or {}
body['title'] = self.title
body['notes'] = self.GetDeepLink()
if self.resolved:
body['status'] = 'completed'
body['completed'] = self.GetDateTime(self.resolved)
else:
body['status'] = 'needsAction'
if 'completed' in body:
body.pop('completed')
return body
def GetDeepLink(self):
"""Return a deeplink to the incident.
Returns:
Deeplink to the incident.
"""
return '%s%s' % (INCIDENT_DEEP_LINK, self.key().id())
@staticmethod
def FromJson(json):
"""Convert the given JSON representation to an Incident.
Sets 'incident.training_review' to True, assuming that anything
could have changed on the client which sent the JSON, including
the Tags.
Does not include messages from given JSON, as messages are read-only.
Args:
json: JSON representation to convert.
Returns:
Incident with all the properties of the given JSON representation.
"""
retval = simplejson.loads(json)
incident = Incident(
title=retval.get('title'),
owner=retval.get('owner'),
status=retval.get('status'),
suggested_tags=retval.get('suggested_tags'),
accepted_tags=retval.get('accepted_tags'),
trained_tags=retval.get('trained_tags'),
training_review=True,
canonical_link=retval.get('canonical_link'))
if retval.get('created') is not None:
incident.created = Incident.ParseDate(retval.get('created'))
if retval.get('updated') is not None:
incident.updated = Incident.ParseDate(retval.get('updated'))
if retval.get('resolved') is not None:
incident.resolved = Incident.ParseDate(retval.get('resolved'))
if retval.get('trained_date') is not None:
incident.trained_date = Incident.ParseDate(retval.get('trained_date'))
return incident
@staticmethod
def ParseDate(date_string):
"""Converts a string into the ISO date format.
Args:
date_string: ISO-formatted date string.
Returns:
Native datetime object.
"""
if '.' in date_string:
(dt, microsecs) = date_string.split('.', 1)
if len(microsecs) > 3:
microsecs = microsecs[:3]
else:
dt = date_string
microsecs = 0
return_datetime = datetime.strptime(dt, Incident.ISO_FORMAT)
return_datetime += timedelta(microseconds=int(microsecs))
return return_datetime
@staticmethod
def GetDateTime(time):
"""Convert a datetime.datetime object to a Tasks API compatible string.
Args:
time: datetime.datetime to convert.
Returns:
String representing the datetime.datetime object.
"""
date_str = time.isoformat()
if len(date_str.split('.')) == 1:
date_str += '.000'
return date_str + 'Z'
class Message(db.Model):
"""Describes a message on from an incident.
Attributes:
message_id: RFC822 message ID. Populated when the message is an email.
in_reply_to: RFC822 message ID that the message references most recently.
references: Series of RFC822 message IDs, in reverse chronological order,
that are referenced by the message.
incident: Incident that the message belongs to.
title: Title of the message.
author: Author of the message.
body: Body of the message in plaintext.
sent: When the message was sent.
mailing_list: Mailing list to which the message was sent (if the message
is an email).
canonical_link: Reference to the canonical location of the message, e.g. a
the Google Group page or the Unify message.
"""
message_id = db.StringProperty()
in_reply_to = db.StringProperty()
references = db.TextProperty()
incident = db.ReferenceProperty(Incident)
title = db.StringProperty(multiline=True)
author = db.EmailProperty()
body = db.TextProperty()
sent = db.DateTimeProperty()
mailing_list = db.StringProperty()
canonical_link = db.StringProperty()
def AssociateMailIncident(self):
"""Associates a message with an incident, using RFC822 message IDs.
If the message refers to an existing incident, adds it to the
incident. If the message does not refer to another message, or
refers to a message not in the datastore, makes a new incident.
Sets the 'incident.updated' field to utcnow.
Sets the 'incident.training_review' to True since the new message
can expand the current training set for the Prediction API.
If the message is referenced by other incidents, merges those into
the incident.
"""
parent = Message.gql('WHERE message_id = :1', self.in_reply_to).get()
if parent and parent.incident:
logging.debug('Parent found: ' + parent.incident.title)
self.incident = parent.incident
self.put()
parent.incident.PurgeJsonCache()
parent.incident.updated = datetime.utcnow()
parent.incident.training_review = True
parent.incident.put()
# Merge other incidents that point to this one, into this incident.
children = Message.gql('WHERE in_reply_to = :1', self.message_id)
Incident.MergeWithParent(parent.incident, children)
else:
children = Message.gql('WHERE in_reply_to = :1 ORDER BY sent ASC',
self.message_id)
if children.count():
logging.debug('Found child messages: ' + str(children.count()))
# Update new message to refer to the oldest existing incident that
# references it.
incident = children[0].incident
self.incident = incident
self.put()
# And update the incident with earlier metadata.
incident.created = self.sent
incident.updated = datetime.utcnow()
incident.training_review = True
incident.title = self.title
incident.author = self.author
incident.mailing_list = self.mailing_list
incident.canonical_link = self.canonical_link
incident.put()
Incident.MergeWithParent(incident, children)
else:
logging.debug('New incident from: ' + self.message_id)
# Or it must be a new incident...
incident = Incident(title=self.title,
author=self.author,
created=self.sent,
mailing_list=self.mailing_list,
canonical_link=self.canonical_link)
incident.put()
self.incident = incident.key()
self.put()
logging.info('Adding to task queue incident_key=' +
str(self.incident.key()))
taskqueue.add(queue_name='predictor', url='/tasks/suggest',
params={'incident_key': str(self.incident.key())})
def ReferencesList(self):
"""Provides a list of RFC822 message IDs referenced by the message.
Returns:
List of RFC822 message IDs, in reverse chronological order, referenced by
the message.
"""
return self.references.split(',')
@staticmethod
def FromMail(mail, message_id, store_body=False):
"""Saves a mail message to the datastore.
Args:
mail: Incoming message to parse and save.
message_id: Message-ID of the incoming message.
store_body: Whether or not to store the message body.
Returns:
Saved message.
"""
message = Message(message_id=message_id)
message.canonical_link = Message.GetCanonicalLink(message_id)
parsed_tz_tuple = email.utils.parsedate_tz(mail.date)
time_tz = email.utils.mktime_tz(parsed_tz_tuple)
message.sent = datetime.utcfromtimestamp(time_tz)
if mail.original.get('Subject') and mail.subject:
message.title = mail.subject
m = re.search('.* <(.*)>', mail.sender)
if m:
message.author = m.group(1)
else:
message.author = mail.sender
logging.debug('Received a message from: ' + message.author)
if store_body:
message.body = Message.GetMailBody(mail, 'text/plain')
message.mailing_list = Message.GetMailingList(mail)
references = mail.original.get_all('References')
if references:
message.references = ','.join(references)
logging.debug(message.references)
message.in_reply_to = Message.GetInReplyTo(mail, references)
message.put()
Message.RecordMailingList(message)
Message._LogMessageIdDetails(message)
return message
@staticmethod
def GetCanonicalLink(message_id):
"""Constructs the canonical link for an email.
Args:
message_id: Message-ID of the incoming message.
Returns:
Canonical link for the email.
"""
base = 'https://mail.google.com/mail/#search/rfc822msgid%3A+'
escaped = urllib.quote_plus(message_id)
return base + escaped
@staticmethod
def GetMailBody(mail, body_type):
"""Retrieves the relevant mail body from the email.
Args:
mail: Incoming message to parse.
body_type: Content type of the body to retrieve.
Returns:
Relevant mail body.
"""
return list(mail.bodies(body_type))[0][1].decode()
@staticmethod
def GetInReplyTo(mail, references):
"""Retrieves the functional In-Reply-To header.
If an actual In-Reply-To header is not found, one will be constructed by
using the last entry of the References header, if it exists.
Args:
mail: Incoming message to parse.
references: Mail references, from the References header.
Returns:
Functional In-Reply-To value.
"""
in_reply_to = mail.original.get('In-Reply-To')
if not in_reply_to and references:
in_reply_to = references[-1].split('\n')[-1].split(' ')[-1]
logging.debug('Using last reference instead of In-Reply-To')
logging.debug(in_reply_to)
if in_reply_to:
single_line = in_reply_to.replace('\n', '')
return single_line
return None
@staticmethod
def GetMailingList(mail):
"""Retrieves the mailing list to which the message was sent.
Will attempt to use one of two headers to find the mailing list.
Args:
mail: Incoming message to parse.
Returns:
Mailing list address.
"""
if mail.original.get('Mailing-list'):
m = re.search('list (.+);', mail.original.get('Mailing-list'))
if m:
return m.group(1)
elif mail.original.get('List-Post'):
m = re.search('<mailto:(.+)>', mail.original.get('List-Post'))
if m:
return m.group(1)
return None
@staticmethod
def RecordMailingList(message):
"""Records existence of new mailing lists not previously recorded.
If the incoming message does not have a mailing list, this is a no-op.
Args:
message: Datastore representation of the incoming message.
"""
if message.mailing_list:
logging.debug('Mailing-list: ' + message.mailing_list)
list_entry = List.gql('WHERE email = :1', message.mailing_list).get()
if not list_entry:
logging.debug('List not found, adding entry')
list_entry = List(email=message.mailing_list)
list_entry.put()
@staticmethod
def _LogMessageIdDetails(message):
"""Saves debug information for the Message-ID and related fields.
Args:
message: Datastore representation of the incoming message.
"""
if message.message_id:
logging.debug('Message-ID: ' + message.message_id)
if message.in_reply_to:
logging.debug('In-Reply-To: ' + message.in_reply_to)
if message.references:
logging.debug('References: ' + message.references)
def GetDict(self):
"""Return a dict representation of this message.
This will return a copy from memcache if it exists (and caching is
enabled), and update the cache if not present.
Returns:
Dict representing the incident.
"""
key = self.GetJsonModelKey()
cached = memcache.get(key)
if cached and settings.USE_MEMCACHE_FOR_JSON_MODELS:
return cached
model = {
'message_id': self.message_id,
'in_reply_to': self.in_reply_to,
'references': self.references,
'title': self.title,
'author': self.author,
'body': self.body,
'sent': self.sent,
'mailing_list': self.mailing_list,
'canonical_link': self.canonical_link}
memcache.set(key, model)
return model
def GetJsonModelKey(self):
"""Returns the key pointing to the instance's JSON representation.
Returns:
JSON model key.
"""
return settings.MEMCACHE_VERSION_PREFIX + str(self.key().id())
class List(db.Model):
"""Describes a mailing list.
Attributes:
name: Name of the mailing list.
email: Email address of the mailing list.
"""
name = db.StringProperty()
email = db.EmailProperty()
class Tag(db.Model):
"""Describes a tag.
A tag includes a model and a category. The model may be explicitly
stated or, if it is missing, all tags with no explicit model are
implicitly part of the same unspecified model.
Attributes:
name: Name of the tag and key of model object. Format:
["model""_MODEL_MARKER"]"category". You can only set 'name'
when you create the Tag because it is the key.
example_count: Total count of current examples (with Accepted tags).
trained_count: Count of examples at last training. At the moment
of training, trained_count = example_count.
trained_date: When this tag's examples were last sent to the Prediction API
"""
example_count = db.IntegerProperty(default=0)
trained_count = db.IntegerProperty(default=0)
trained_date = db.DateTimeProperty()
# _DEFAULT_MODEL is used when the user does not specify a model.
# This app uses this string to name a training set for the Prediction
# API, creating a file on Google Storage with this prefix. This
# string also appears on the User Settings page to describe the
# model created when the user does not specify a model. You might
# want to change this string to localize it for presentation.
_DEFAULT_MODEL = 'unspecified_model'
# _DEFAULT_CATEGORY should never be seen or assigned since the UI
# should always guarantee a non-blank Tag name. Provided as a safe
# fallback. There is no need to change it.
_DEFAULT_CATEGORY = 'unspecified_category'
# _MODEL_MARKER defines the character which splits the model from
# the category. If you change this then you must also change the
# Javascript which enforces the tag definition in ui.js:
# google.devrel.samples.autodo.Bindings.bindTagTextInput
_MODEL_MARKER = '-'
@property
def name(self):
"""Get the Key name."""
return self.key().name()
@classmethod
def ModelCategory(cls, tag):
"""Split a tag into a model and category.
The goal is to isolate all the knowledge about how to parse a tag
and model within Tag so that other functions don't have to change
if we modify the format.
Args:
tag: String, the tag as typed by the user or Tag.name.
Returns:
Dictionary of [model, category, explicit]
model: Group of competing tags.
category: A classification within a model.
explicit: True if model was specified,
False if we applied default model name.
"""
logging.info('TAG=%s', tag)
split = dict(zip(('model', 'category'),
tag.split(cls._MODEL_MARKER)))
if 'category' in split:
split['explicit'] = True
else:
split['explicit'] = False
split['category'] = split['model']
split['model'] = cls._DEFAULT_MODEL
if not split['model']:
split['model'] = cls._DEFAULT_MODEL
if not split['category']:
split['category'] = cls._DEFAULT_CATEGORY
return split
@classmethod
def ModelMatches(cls, model, tag):
"""Determine if a tag is a category of a model."""
if cls._MODEL_MARKER not in tag and (
not model or model == cls._DEFAULT_MODEL):
return True
else:
return tag.startswith(model + cls._MODEL_MARKER)
@classmethod
def CreateMissingTags(cls, incident):
"""Create Tag Instances for tags in the given incident.
Tags could have come from the Prediction API or the user.
Args:
incident: Incident to pull tags from for creation.
"""
tags = set(incident.suggested_tags)
tags.update(incident.accepted_tags)
for tag in tags:
# Use negative example_count to signal a new tag.
tag_instance = cls.get_or_insert(tag, example_count=(-1))
if tag_instance.example_count < 0:
tag_instance.example_count = 0
tag_instance.put()
SuggestionModel.CreateMissingModel(tag)
class Credentials(db.Model):
"""Credentials Datastore class to store user's credentials information.
Attributes:
credentials: User's OAuth 2.0 credentials.
email: User's email.
user_id: User's ID (also used as key).
"""
credentials = CredentialsProperty()
email = db.StringProperty()
@property
def user_id(self):
return self.key().name()
class UserSettings(db.Model):
"""Store user's settings.
Attributes:
tasks_credentials: Tasks API scoped credentials.
email: User's email (also used as key).
add_to_tasks: Whether or not to automatically add assigned incidents to
the user's task list.
task_list_id: ID of the task list to add the incidents to.
"""
tasks_credentials = CredentialsProperty()
add_to_tasks = db.BooleanProperty(default=False)
task_list_id = db.StringProperty(default='@default')
@property
def email(self):
return self.key().name()
class IncidentTask(db.Model):
"""Store link between an incident and a user's Task.
Attributes:
incident_id: ID of the incident (also used as key).
task_id: ID of the user's task.
task_list_id: ID of the user's task list.
owner: Owner of this IncidentTask.
"""
task_id = db.StringProperty()
task_list_id = db.StringProperty(default='@default')
owner = db.StringProperty()
@property
def incident_id(self):
return self.key().name()
class SuggestionModel(db.Model):
"""Track data related to a model that was sent to the Prediction API.
Attributes:
name: The name of the model. Read-only. Set at creation time.
training_file: Name of the Google Storage object for this model.
Empty if never sent to Google Storage.
training_date: Time and date when training was confirmed complete.
training_tags: Tags included in the original training set.
export_file: Name of downloadble file containing training set.
Empty if never exported.
export_date: Time and date when data last exported.
export_tags: Tags included with exported data set.
ui_tags: Tags to be shown in the UI as examples of this model.
These tags could include new tags not yet added to a training or export.
"""
training_file = db.StringProperty()
training_date = db.DateTimeProperty()
training_tags = db.ListProperty(str)
training_examples = db.IntegerProperty(default=0)
export_file = db.StringProperty()
export_date = db.DateTimeProperty()
export_tags = db.ListProperty(str)
export_examples = db.IntegerProperty(default=0)
ui_tags = db.ListProperty(str)
@property
def name(self):
"""Get the Key name."""
return self.key().name()
@classmethod
def CreateMissingModel(cls, tag):
"""Create a new model for a tag, if necessary, and add tag to ui list.
Args:
tag: String name of a specific Tag.
"""
model_name = Tag.ModelCategory(tag)['model']
suggestion_model = cls.get_or_insert(model_name)
suggestion_model.AddUITags([tag])
suggestion_model.put()
def AddUITags(self, tags):
"""Add one or more tags to this model for display in UI.
This is strictly a convenience function for the UI and does not create a
canonical list. The canonical lists are in training_tags and export_tags
which contain all the tags present at the generation of those training sets.
If the model name in the tag does not match this current model
key_name then no change to the entity. If the model name matches
then the tag will be added to the ui_tags set.
Args:
tags: list of Strings
"""
ui_tags = [tag for tag in tags if Tag.ModelMatches(self.name, tag)]
self.ui_tags = list(set(ui_tags))
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys,os
import re
import ConfigParser
import memcache
import urlparse
import smtplib
from email.mime.text import MIMEText
import urlparse
from BaseHTTPServer import HTTPServer,BaseHTTPRequestHandler
class RequestHandler(BaseHTTPRequestHandler):
def sendmail(self, email):
sender = 'admin@xxx.com'
mailto = email
#邮件信息
msg =MIMEText("new password is 365im")
msg['Subject'] = 'reg ok'
msg['to'] = mailto
msg['From'] = sender
#连接发送服务器
smtp = smtplib.SMTP('mail.xxx.com')
smtp.login('admin@xxx.com', 'password')
#发送
smtp.sendmail(sender,mailto,msg.as_string())
smtp.quit()
print 'Send OK'
def checkAccountValid(self, email): #返回0表示此邮箱可用
if len(email) < 6: #x@a.cn,最少6个符
return -1
if re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email) == None:
return -2
value = mc.get(email)
if (value == None):
return 0
return 1
def _writeheaders(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
def do_HEAD(self):
self._writeheaders()
def do_GET(self):
self._writeheaders()
#4011,4012,4013,4014 total 4 xmpp client, idle or busy can get from memcached
#self.wfile.write(self.path[0:14])
ret='0'
if self.path[0:12] == '/reg_new_user': #http://xxx/reg_new_user/user,pw,time,code
url=self.path[12:];
#ret=str2;
strlist=url.split(',')
if len(strlist)!=4:
ret='0';
else:
mail=strlist[0]
#pw=strlist[1]
time=strlist[2]
code=strlist[3]
if (self.checkAccountValid(mail) == 0):
value="365im"+"|"+time+code
mc.set(mail, value)
else:
ret='0'
self.wfile.write(ret)
host = ''
port = 5300
memcache_host='ip:port'
try:
config = ConfigParser.ConfigParser()
config.read('config.ini')
host=config.get('serverinfo', 'host')
port=config.get('serverinfo', 'port')
memcache_host = config.get('serverinfo', 'memcache')
print 'Init OK'
serveraddr = (host, int(port))
mc = memcache.Client([memcache_host],debug=0)
srvr = HTTPServer(serveraddr,RequestHandler)
srvr.serve_forever()
except:
print 'Open failled'
exit()
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys,os
import re
import ConfigParser
import memcache
import urlparse
import smtplib
from email.mime.text import MIMEText
import urlparse
from BaseHTTPServer import HTTPServer,BaseHTTPRequestHandler
class RequestHandler(BaseHTTPRequestHandler):
def sendmail(self, email):
sender = 'admin@xxx.com'
mailto = email
#邮件信息
msg =MIMEText("new password is 365im")
msg['Subject'] = 'reg ok'
msg['to'] = mailto
msg['From'] = sender
#连接发送服务器
smtp = smtplib.SMTP('mail.xxx.com')
smtp.login('admin@xxx.com', 'password')
#发送
smtp.sendmail(sender,mailto,msg.as_string())
smtp.quit()
print 'Send OK'
def checkAccountValid(self, email): #返回0表示此邮箱可用
if len(email) < 6: #x@a.cn,最少6个符
return -1
if re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email) == None:
return -2
value = mc.get(email)
if (value == None):
return 0
return 1
def _writeheaders(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
def do_HEAD(self):
self._writeheaders()
def do_GET(self):
self._writeheaders()
#4011,4012,4013,4014 total 4 xmpp client, idle or busy can get from memcached
#self.wfile.write(self.path[0:14])
ret='0'
if self.path[0:12] == '/reg_new_user': #http://xxx/reg_new_user/user,pw,time,code
url=self.path[12:];
#ret=str2;
strlist=url.split(',')
if len(strlist)!=4:
ret='0';
else:
mail=strlist[0]
#pw=strlist[1]
time=strlist[2]
code=strlist[3]
if (self.checkAccountValid(mail) == 0):
value="365im"+"|"+time+code
mc.set(mail, value)
else:
ret='0'
self.wfile.write(ret)
host = ''
port = 5300
memcache_host='ip:port'
try:
config = ConfigParser.ConfigParser()
config.read('config.ini')
host=config.get('serverinfo', 'host')
port=config.get('serverinfo', 'port')
memcache_host = config.get('serverinfo', 'memcache')
print 'Init OK'
serveraddr = (host, int(port))
mc = memcache.Client([memcache_host],debug=0)
srvr = HTTPServer(serveraddr,RequestHandler)
srvr.serve_forever()
except:
print 'Open failled'
exit()
| Python |
import sys, string, re, Queue
arith = ['sub', 'div', 'mod', 'cmple', 'add', 'mul', 'cmpeq', 'cmplt']
operators = ['-', '/', '%', '<=', '+', '*', '==', '<']
arith1 = ['neg']
local_size = 0;
# get operand
def getOperand(t, sline, access_local):
#GP
if sline[t] == 'GP':
print '(char*)global',
return t+1
#FP
elif sline[t] == 'FP':
if access_local:
print '(char*)&local[' + str(local_size/8 -1) + ']',
else:
print '(char*)param',
return t+1
#constant
elif sline[t].isdigit():
print sline[t],
return t+1
#address offsets and field offsets
elif sline[t].endswith('_base') or sline[t].endswith('_offset'):
if sline[t+1][0] == '-':
print '(' + str(int(sline[t+1])+8) + ')',
return -(t+2)
else:
print str(int(sline[t+1])-8),
return t+2
#register name
elif sline[t][0] == '(':
print 'r' + sline[t].strip('()'),
return t+1
#code label
elif sline[t][0] == '[':
print 'instr_' + sline[t].strip('[]'),
return t+1
#local variables
else:
if sline[t+1][0] == '-':
print 'local[' + str((local_size-int(sline[t+1].strip('-')))/8) + ']',
else:
print 'param[' + str(int(sline[t+1])/8-1) + ']',
return t+2
# get next operand start
def getStart(t, sline):
#GP
if sline[t] == 'GP':
return t+1
#FP
elif sline[t] == 'FP':
return t+1
#constant
elif sline[t].isdigit():
return t+1
#address offsets and field offsets
elif sline[t].endswith('base') or sline[t].endswith('_offsets'):
if sline[t+1][0] == '-':
return -(t+2)
else:
return t+2
#register name
elif sline[t][0] == '(':
return t+1
#code label
elif sline[t][0] == '[':
return t+1
#local variables
else:
return t+2
#----------------- Main -----------------#
#if len(sys.argv) != 2:
# print "please specify input file name"
# sys.exit(0)
#
#ifile = open(sys.argv[1], 'r')
#parameters
params = Queue.LifoQueue()
params_n = 0
parsing_main = 0
# Print out header of the file
print '#include <stdio.h>\n\
#include <stdlib.h>\n\
#include <string.h>\n\
#define WriteLine() printf("\\n");\n\
#define WriteLong(x) printf(" %lld", x);\n\
#define ReadLong(a) if (fscanf(stdin, "%lld", &a) != 1) a = 0;\n\
#define long long long\n\n'
print 'long global[4096];\n'
# parse the file line by line
#for line in ifile:
for line in sys.stdin:
sline = re.split(': | |#', line.rstrip('\n').lstrip(' '))
if sline[2] == 'nop':
continue
#print label for next instruction
if sline[2] != 'enter' and sline[2] != 'entrypc':
print 'instr_' + sline[1] + ':;\n\t',
#function start
if sline[2] == 'enter':
assert int(sline[3]) % 8 == 0, 'operand not divisible by 8';
if not parsing_main:
print 'void func_' + sline[1] + '(long* param) {\n',
else:
print 'void main() {\n',
if (sline[3] != '0'):
print 'long local[' + str(int(sline[3])/8) + '];\n',
local_size = int(sline[3]);
parsing_main = 0
#main start
if sline[2] == 'entrypc':
parsing_main = 1
#function return
elif sline[2] == 'ret':
print 'return;\n}\n',
#arithmatic
# elif sline[2] in arith:
# print 'long r' + sline[1] + ' =',
## t = getOperand(3, sline, 0)
## print operators[arith.index(sline[2])],
## if (t < 0):
## getOperand(-t, sline, 1)
## else:
## getOperand(t, sline, 0)
# t = getStart(3, sline)
# if (t < 0):
# getOperand(-t, sline, 1)
# else:
# getOperand(t, sline, 0)
# print operators[arith.index(sline[2])],
# getOperand(3, sline, 0)
# print ';\n',
elif sline[2] in arith:
print 'long r' + sline[1] + ' =',
t = getOperand(3, sline, 0)
print operators[arith.index(sline[2])],
if (t < 0):
getOperand(-t, sline, 1)
else:
getOperand(t, sline, 0)
print ';\n',
elif sline[2] in arith1:
print 'long r' + sline[1] + ' =',
t = getOperand(3, sline, 0)
print ' * (-1);\n',
#branch
elif sline[2] == 'br':
print 'goto ',
getOperand(3, sline, 0)
print ';\n',
elif sline[2] == 'blbs':
print 'if (',
t = getOperand(3, sline, 0)
print '!= 0) goto',
getOperand(t, sline, 0)
print ';\n',
elif sline[2] == 'blbc':
print 'if (',
t = getOperand(3, sline, 0)
print '== 0) goto',
getOperand(t, sline, 0)
print ';\n',
#data movement
elif sline[2] == 'load':
print 'long r' + sline[1] + ' = *(long*)',
getOperand(3, sline, 0)
print ';\n',
elif sline[2] == 'move':
t = getStart(3, sline);
getOperand(t, sline, 0)
print ' = ',
getOperand(3, sline, 0)
print ';\n',
elif sline[2] == 'store':
print '*(long*)',
t = getStart(3, sline)
getOperand(t, sline, 0)
print ' =',
getOperand(3, sline, 0)
print ';\n',
#I/O
elif sline[2] == 'write':
print 'WriteLong(',
getOperand(3, sline, 0)
print ');\n',
elif sline[2] == 'wrl':
print 'WriteLine();\n',
elif sline[2] == 'read':
#TODO: read didn't appear in all any tests.. need to be tested
print 'long r' + sline[1] + ';\n\t',
print 'ReadLong( r' + sline[1],
print ');\n',
#Parameter and call
elif sline[2] == 'param':
print 'long r' + sline[1] + ' = ',
getOperand(3, sline, 0)
print ';//input parameter\n',
params.put(sline[1])
params_n += 1
elif sline[2] == 'call':
param_name = 'param_' + sline[1]
print 'long* ' + param_name + ' = (long*)malloc(sizeof(long)*' + str(params_n+1) + ');\n',
params_n = 0;
while not params.empty():
tt = params.get();
print 'memcpy(' + param_name + '+' + str(params_n+1) + ', &r' + tt + ', sizeof(long));\n',
params_n += 1
params_n = 0
print 'func_' + sline[3].strip('[]') + '(' + param_name + ');\n',
print 'free (' + str(param_name) + ');\n',
sys.exit(0)
| Python |
import logging
from web.webbase import *
from classes import classes
from imports.n3tagvalue import svalue
def gettag(t):
return t if t[0].isalpha() else t[1:]
def getprop(t):
t=gettag(t)
ns = 'ofx' if t.isupper() else 'a3'
return ns+':'+t
def getclass(items,typ,acctids,dtstart,dtend,uniqueids,fltrextension=''):
"""Collect objects in items
typ - the class to be collected as defined in classes module.
acctids - limit results to lines from ACCTID which is one of the values in the list
dtstart, dtend - in the class definition look for all the tags that make up the key for an object.
if one of those tags begin with DT then it is taken to be the time-key (e.g. DTASOF).
take only lines that the time-key falls in the range.
fltrextension - manually define additional SPARQL filter lines.
"""
c=classes[typ]
ms=c['key']+c['mandatory']
fltr=""
if acctids and 'ACCTID' not in ms: ms.append('ACCTID')
if acctids and len(acctids)<10:
fltr+='filter ('
fltr+='( ?ACCTID = "'+'" ) || ( ?ACCTID = "'.join(acctids)+'" )'
fltr+=')\n'
if dtstart or dtend:
dtkey=''
for k in c['key']:
if k.upper()[0:2]=="DT":
dtkey=k
break
if dtstart and dtkey:
d,t=svalue(dtkey,list(dtstart)[0])
fltr+='filter ( ?%s >= "%s"%s )\n'%(dtkey,d,t)
if dtend and dtkey:
dtend=list(dtend)[0]
# make sure dtend refers to the last moment of the day
if len(dtend)==8 or len(dtend)==6:
dtend+='235959'
d,t=svalue(dtkey,dtend)
fltr+='filter ( ?%s <= "%s"%s )\n'%(dtkey,d,t)
if uniqueids and 'UNIQUEID' not in ms: ms.append('UNIQUEID')
if uniqueids and len(uniqueids)<10:
fltr+='filter ('
fltr+='( ?UNIQUEID = "'+'" ) || ( ?UNIQUEID = "'.join(uniqueids)+'" )'
fltr+=')\n'
fltr+=fltrextension
os=c['optional']
vs=ms+os
#q="""PREFIX ofx:<http://www.w3.org/2000/10/swap/pim/ofx#>
#PREFIX a3:<http://code.google.com/p/3account/wiki/Schema#>
#PREFIX xsd:<http://www.w3.org/2001/XMLSchema#>
q="""select distinct"""
for v in vs:
q+=" ?"+gettag(v)
q+=" {\n"
for v in ms:
q+=" ?x "+getprop(v)+" ?"+gettag(v)+".\n"
for v in os:
q+="optional { ?x "+getprop(v)+" ?"+gettag(v)+"}.\n"
q+=fltr
q+="}"
logging.info("SPARQL query:\n%s"%q)
rows=con.query(q)
# do post filtering on acctids and uniqueids in case they have more than
# 10 elements
# there is no such problem with dtstart/end
for row in rows:
if (acctids and
(row['ACCTID']['value']
if isinstance(row['ACCTID'],dict)
else row['ACCTID'][0])
not in acctids): continue
if (uniqueids and
(row['UNIQUEID']['value']
if isinstance(row['UNIQUEID'],dict)
else row['UNIQUEID'][0])
not in uniqueids): continue
row2dict(items,row,c['key'],vs,typ=typ)
def getassets(items,uniqueids):
if uniqueids:
getclass(items,'asset',None,None,None,uniqueids)
for o in items.itervalues():
if o['type']!='asset': continue
if 'assetClass' not in o:
if 'FIASSETCLASS' in o:
o['assetClass']=o['FIASSETCLASS']
elif 'ASSETCLASS' in o:
o['assetClass']=o['ASSETCLASS']
if 'SECTYPE' in o:
nm = o['SECTYPE'][0]+'TYPE'
if nm in o:
o['SECTYPE'][0] =o['SECTYPE'][0] + ' ' + o[nm][0]
def getassetspercents(items,uniqueids):
"""Find all Asset objects with uniqueids and to each add a 'percent' dictionary which gives
the percentage of each ASSETCLASS for that asset"""
getassets(items,uniqueids)
assetpercents={}
getclass(assetpercents,'assetpercent',None,None,None,uniqueids)
for o in assetpercents.itervalues():
uniqueid=o['UNIQUEID'][0]
if 'percent' not in items[uniqueid]:
items[uniqueid]['percent']={}
items[uniqueid]['percent'][o['ASSETCLASS'][0]]=o['PERCENT'][0]
def getpositions(items,acctids,dtstart,dtend,uniqueids=None):
# Make sure we dont get entries from the same account (ACCTID) from a latter date
# then the maximal date that falls in the range
dtend1=list(dtend)[0]
# make sure dtend refers to the last moment of the day
if len(dtend1)==8 or len(dtend1)==6:
dtend1+='235959'
d,t=svalue('DTASOF',dtend1)
ext=('optional {'+
'?y2 ofx:DTASOF ?dt2 . '+
'?y2 ofx:ACCTID ?ACCTID . '+
'filter( ?dt2 > ?DTASOF ).'+
'filter( ?dt2 <= "%s"%s )'%(d,t)+
'}. '+
'filter(!bound(?dt2)).\n')
getclass(items,'position',acctids,dtstart,dtend,uniqueids,fltrextension=ext)
def gettransactions(items,acctids,dtstart,dtend,uniqueids=None):
getclass(items,'transaction',acctids,dtstart,dtend,uniqueids)
getclass(items,'checking',acctids,dtstart,dtend,None)
def getchecking(items,acctids,dtstart,dtend,uniqueids=None):
getclass(items,'checking',acctids,dtstart,dtend,None)
| Python |
import os
from rdflib import Namespace, BNode, Literal, RDF, URIRef
from semprog.pysesame import connection
import cherrypy
from mako.lookup import TemplateLookup
import urllib
from simplejson import dumps
from classes import classes
user_cfg = os.path.expanduser("~/cherrypy.cfg")
if os.path.exists(user_cfg):
cherrypy_cfg = user_cfg
elif os.path.exists("cherrypy.cfg"):
cherrypy_cfg = "cherrypy.cfg"
else:
cherrypy_cfg = None
if cherrypy_cfg:
cherrypy.config.update(cherrypy_cfg)
else:
cherrypy.config.update({'server.socket_port':8000})
url=cherrypy.config.setdefault('sesame.url','http://localhost:8080')
url+='/openrdf-sesame/'
lookup = TemplateLookup(directories=['templates'],
output_encoding='utf-8',encoding_errors='replace',
input_encoding='utf-8')
con=connection(url)
repository=cherrypy.config.setdefault('sesame.repository','3account')
con.use_repository(repository)
OFX = Namespace("http://www.w3.org/2000/10/swap/pim/ofx#")
OFXH = Namespace("http://www.w3.org/2000/10/swap/pim/ofx-headers#")
XSD = Namespace("http://www.w3.org/2001/XMLSchema#")
A3 = Namespace("http://code.google.com/p/3account/wiki/Schema#")
con.addnamespace('ofx',OFX)
con.addnamespace('ofxh',OFXH)
con.addnamespace('xsd',XSD)
con.addnamespace('a3',A3)
def lst(l):
"""make sure something is a list"""
if isinstance(l,list):
return l
else:
return [l]
def row2class(items,row,typ):
c=classes[typ]
row2dict(items,row,c['key'],c['key']+c['mandatory']+c['optional'],typ)
def row2dict(objDict,row,idtag,tags,typ=None):
""" Build objects out of flat-lines!
row is a dict with tag:value pairs.
The value in row is either a list of literals (str,float) or a dict of the form {'value':value,'datatype':'xsd:...'}.
The function searches for an object in row and if found adds it to objDict.
The key to the object in objDict is a str of a single value or str of a list of multiple values.
In this way an object with a simple key is easy to access.
The value(s) for the key are taken from the values assigned in row to the tag(s) defined by idtag.
If not all idtag(s) are found in row then the object is not "in" the row.
An object is dict stored as a value in objDict.
The object may have attributes which are tag:[value,...] pairs were value (str,float) are taken from the row
and the tag are taken from tags list.
objDict={...
str(row[idtag[0]]):{'label':key, 'type':typ, tags[0]:[..., row[tags[0]], ...}
or
str([row[idtag[0]], row[idtag[1]],...]):{'label':key, 'type':typ, tags[0]:[..., row[tags[0]], ...}
...}
Only unique values are stored, and if the tag in tags is prefixed with '+' the value (float) is added
"""
idtag = lst(idtag)
if any(t not in row for t in idtag): return
nrow =dict((t,[v['value']] if isinstance(v,dict) else v) for t,v in row.iteritems())
key=str(nrow[idtag[0]][0] if len(idtag)==1
else [nrow[t][0] for t in idtag])
if key not in objDict: objDict[key] = {'label':key}
if typ: objDict[key]['type']=typ
for tag in tags:
add=False
if tag.startswith('+'):
tag=tag[1:]
add=True
if tag not in row: continue
for v in nrow[tag]:
if (isinstance(row[tag],dict) and
row[tag].get('datatype','') == str(XSD.term('decimal'))):
v = float(v)
if tag not in objDict[key]:
objDict[key][tag]=[0.] if add else []
if add:
objDict[key][tag][0]+=float(v)
elif v not in objDict[key][tag]:
objDict[key][tag].append(v)
from htmlentitydefs import codepoint2name
def htmlentities(u):
result = []
for c in u:
if ord(c) in codepoint2name:
result.append('&%s;' % codepoint2name[ord(c)])
elif c==' ':
result.append(' ')
else:
result.append(c)
return ''.join(result)
def printCleanup(trans):
""" cleanup for printing """
if not isinstance(trans,list): trans=[trans]
for tran in trans:
for tag in tran.iterkeys():
tran[tag] = [v[v.rfind("#")+1:] if isinstance(v,basestring) else v for v in tran[tag]]
tran[tag] = [htmlentities("%.2f"%v if isinstance(v,float) else v) for v in tran[tag]]
| Python |
from web.webbase import *
from get import getclass,getpositions,getassets
def accounts():
items={}
getclass(items,'account',None,None,None,None)
return items.values()
def accounttotals(acctids,dtstart,dtend):
# collect information on positions
positions={}
getpositions(positions,acctids,dtstart,dtend)
# add information on assets
assets={}
uniqueids=set(o['UNIQUEID'][0] for o in positions.itervalues()) # get all uniqueids in use
getassets(assets,uniqueids)
# collect information on accounts
accounts={}
# get general information on accounts which is NOT time dependent
getclass(accounts,'account',acctids,None,None,None)
# collect information on statements
getclass(accounts,'accountdtasof',acctids,dtstart,dtend,None)
# for each account and DTASOF, sum up MKTVAL of all positions.
for o in positions.itervalues():
if o['type']!='position': continue
if 'MKTVAL' not in o: continue
row={'ACCTID':o['ACCTID'],
'DTASOF':o['DTASOF'],
'mktval':o['MKTVAL']}
# count monetmarket that are marked as CASH
if 'CASH' in assets.get(o['UNIQUEID'][0],{}).get('assetClass',[]):
row['moneymrkt']=o['MKTVAL']
row2class(accounts,row,"accountdtasof")
# Add cash to total value
for o in accounts.itervalues():
if o['type']!='accountdtasof': continue
if 'AVAILCASH' not in o:
o['AVAILCASH']=o.get('moneymrkt',[0.])
if 'mktval' in o:
o['mktval'][0]+=o['AVAILCASH'][0]-o.get('moneymrkt',[0.])[0]
else:
o['mktval']=o['AVAILCASH']
o['mktval'][0]+=o.get('BALAMT',[0.])[0]
accountdtasofs = [o for o in accounts.itervalues()
if o['type']=='accountdtasof']
for o in accountdtasofs:
ACCTID=o['ACCTID'][0]
accountGroups=accounts[ACCTID].get('accountGroup',[])
for accountGroup in accountGroups:
row2class(accounts,{'accountGroup':[accountGroup],
'dayasof':[o['DTASOF'][0][:10]],
'mKTVAL':o['mktval']},'groupdayasof')
return accounts.values()
| Python |
from get import *
def asset(uniqueids):
items={}
getpositions(items,None,None,None,uniqueids)
gettransactions(items,None,None,None,uniqueids)
getassets(items,uniqueids)
return items.values() | Python |
"""A dictonary of all the classes of objects that can be found in flat lines.
the keys are the class types (names). The values describe each class:
* The key list describes all the tags that must appear in a flat line
in order for an object of the class to be said to be in the line.
The object is uniquely identified by the values these tags receive
in the flat line. The keys are also properties of the object.
* In a key each tag receives one value (as appose to other properties that can
have multiple values.) If multiple key values appear in a single flat line
the different objects of the same class that are identified by the different
key value should be smushed togther (see pending patent US20080215619)
* The mandatory and optional lists specify additional properties of the object.
The values assigned to the properties are accumulated from all the lines
in which an object appears. Its possible for multiple values to be defined
in the same flat line. By default, only uniqiue values are taken, but if
the + sign is prefixed to a property tag, all the accumulated values are
summed togther.
* In addition properties in the mandatory list must appear in every flat line
in which the object is said to be in. However, these properties are not used
as part of the object's key. The mandatory list acts as a filter on the lines
in which an object can appear.
"""
classes={
'account':{
'key':['ACCTID'],
'mandatory':[],
'optional':['accountGroup','CURDEF','aCCTTYPE','ACCTTYPE']},
'asset':{
'key':['UNIQUEID'],
'mandatory':[],
'optional':['SECNAME','TICKER','assetClass',
'FIASSETCLASS','ASSETCLASS','SECTYPE',
'STOCKTYPE', 'OPTTYPE', 'DEBTTYPE', 'MFTYPE', 'DTMAT']},
'assetpercent':{
'key':['UNIQUEID','ASSETCLASS'],
'mandatory':['PERCENT'],
'optional':[]},
'position':{
'key':['ACCTID','DTASOF','UNIQUEID'],
'mandatory':['UNITS'],
'optional':['UNITPRICE','MKTVAL','DTPRICEASOF']},
'transaction':{
'key':['FITID', 'DTTRADE'],
'mandatory':['ACCTID'],
'optional':['DTSETTLE', 'UNIQUEID','SECNAME',
'tran', 'TRNTYPE',
'units', 'UNITPRICE','CURRENCY',
'TOTAL', 'FEES', 'TAXES',
'mEMO']
},
'checking':{
'key':['FITID','DTPOSTED'],
'mandatory':['ACCTID'],
'optional':['NAME','PAYEEID','TRNTYPE',
'TRNAMT', 'mEMO']
},
# the following are "statement" objects
# the tags mKTVAL, mktval and moneymrkt can be computed by web server
'groupdayasof':{
'key':['accountGroup','dayasof'],
'mandatory':[],
'optional':['+mKTVAL']},
'accountdtasof':{
'key':['ACCTID','DTASOF'],
'mandatory':[],
'optional':['AVAILCASH','BALAMT','bALAMT','+mktval','+moneymrkt']},
}
| Python |
from get import *
def transactions(acctids,dtstart,dtend):
items={}
gettransactions(items,acctids,dtstart,dtend)
# add information on assets
uniqueids=set(o['UNIQUEID'][0]
for o in items.itervalues()
if 'UNIQUEID' in o) # get all uniqueids in use
getassets(items,uniqueids)
return items.values()
| Python |
from get import *
def positions(acctids,dtstart,dtend):
items={}
getpositions(items,acctids,dtstart,dtend)
# add information on assets
uniqueids=set(o['UNIQUEID'][0] for o in items.itervalues()) # get all uniqueids in use
getassets(items,uniqueids)
return items.values()
def positionspercents(acctids,dtstart,dtend):
"""Return positions and break down positions according to percentage in each ASSETCLASS.
Each position object has an additional field ASSETCLASS."""
positions={}
getpositions(positions,acctids,dtstart,dtend)
# add information on assets
uniqueids=set(o['UNIQUEID'][0] for o in positions.itervalues()) # get all uniqueids in use
assets={}
getassetspercents(assets,uniqueids)
newitems=[]
for key,p in positions.iteritems():
uniqueid=p['UNIQUEID'][0]
mktval = p['MKTVAL'][0]
if mktval<1.: continue
a = assets[uniqueid]
if 'percent' in a:
undefinedmktval=mktval
for assetclass,percent in a['percent'].iteritems():
newp = p.copy()
newp['label']=key+assetclass
newp['ASSETCLASS']=[assetclass]
newmktval=mktval*float(percent)/100.
newp['MKTVAL']=[str(newmktval)]
if newmktval>1.:
newitems.append(newp)
undefinedmktval-=newmktval
if undefinedmktval>1.:
p['MKTVAL'][0]=str(undefinedmktval)
newitems.append(p)
elif 'assetClass' in a:
p['ASSETCLASS']=a['assetClass']
newitems.append(p)
else:
newitems.append(p)
return assets.values()+newitems | Python |
from get import *
def checking(acctids,dtstart,dtend):
items={}
getchecking(items,acctids,dtstart,dtend)
return items.values()
| Python |
__version__ = "$Id$"
import os
import threading
import random
import urllib2
import cherrypy
from mako.template import Template
from mako.lookup import TemplateLookup
from urllib import quote_plus
from web.webbase import *
from web.position import positions,positionspercents
from web.asset import asset
from web.account import accounts,accounttotals
from web.transaction import transactions
from web.checking import checking
from cherrypy.lib.static import serve_file
from optparse import OptionParser
import logging
#Crazy line needed for OS X 10.6
urllib2.install_opener(urllib2.build_opener())
current_dir = os.path.dirname(os.path.abspath(__file__))
class Main(object):
def __init__(self):
self.lock=threading.Lock()
self.msgs={}
self.lastFetch=None
def jmsg(self,func,**msg):
"""msgs communicate url parameters between the method that fetches the web page
and fetch_json that generates the content for that web page:
the url parameters are kept in a dictonary (msg) that also hold the key 'func' which gives
a pointer to a function that accepts msg as single parameter and returns the JSON response.
each msg is held with a random key which is also givn to the web page.
the web page using its Mako template pulls the JSON information using fetch_json?msgkey=<msgkey>"""
msg['func']=func
# instead of using a random msgkey str(random.getrandbits(30))
# use a deterministic hash of msg which is assumed to have values that are eitehr str or [str]
msgkey=()
keys=msg.keys()
keys.sort()
for key in keys:
value = msg[key]
if isinstance(value,list):
value.sort()
value=tuple(value)
msgkey+=(key,value)
msgkey=str(abs(hash(msgkey)))
self.lock.acquire()
if msgkey not in self.msgs:
self.msgs[msgkey]=msg
self.lock.release()
return msgkey
@cherrypy.expose
def fetch_json(self,jmsg):
self.lock.acquire()
if jmsg=='last':
jmsg=self.lastFetch
assert(jmsg in self.msgs)
msg=self.msgs[jmsg]
#del self.msgs[jmsg] # each msg can be used once
if 'items' in msg:
items=msg['items']
else:
func=msg['func']
args=msg.copy()
del args['func']
for k,v in args.iteritems():
if isinstance(v,basestring):
args[k]=set([v]) if v else set()
else:
args[k]=set(v)
items = func(**args)
msg['items']=items
self.lastFetch=jmsg
self.lock.release()
return dumps({'items':items})
@cherrypy.expose
def index(self):
url=cherrypy.config.get('sesame.url')
repository=cherrypy.config.get('sesame.repository')
url+='/openrdf-workbench/repositories/'+repository
t=lookup.get_template('index.html')
return t.render(url=url,config=cherrypy.config)
@cherrypy.expose
def transactions(self,DTSTART,DTEND,ACCTID):
t=lookup.get_template('transactions.html')
return t.render(jmsg=self.jmsg(transactions,acctids=ACCTID,
dtstart=DTSTART,dtend=DTEND),
config=cherrypy.config)
@cherrypy.expose
def checking(self,DTSTART,DTEND,ACCTID):
t=lookup.get_template('checking.html')
return t.render(jmsg=self.jmsg(checking,acctids=ACCTID,
dtstart=DTSTART,dtend=DTEND),
config=cherrypy.config)
@cherrypy.expose
def positions(self,DTSTART,DTEND,ACCTID):
t=lookup.get_template('positions.html')
return t.render(jmsg=self.jmsg(positions,acctids=ACCTID,
dtstart=DTSTART,dtend=DTEND),
config=cherrypy.config)
@cherrypy.expose
def positionspercents(self,DTSTART,DTEND,ACCTID):
t=lookup.get_template('positionspercents.html')
return t.render(jmsg=self.jmsg(positionspercents,acctids=ACCTID,
dtstart=DTSTART,dtend=DTEND),
config=cherrypy.config)
@cherrypy.expose
def accounttotals(self,DTSTART,DTEND,ACCTID):
t=lookup.get_template('accounttotals.html')
return t.render(jmsg=self.jmsg(accounttotals,acctids=ACCTID,
dtstart=DTSTART,dtend=DTEND),
config=cherrypy.config)
@cherrypy.expose
def accounts(self):
t=lookup.get_template('accounts.html')
return t.render(jmsg=self.jmsg(accounts),config=cherrypy.config)
@cherrypy.expose
def asset(self,UNIQUEID):
t=lookup.get_template('asset.html')
return t.render(jmsg=self.jmsg(asset,uniqueids=UNIQUEID),
config=cherrypy.config)
@cherrypy.expose
def item(self):
t=lookup.get_template('item.html')
return t.render(jmsg='last',config=cherrypy.config)
def main():
parser = OptionParser(usage="usage: %prog [options]",
description="Webserver front-end for 3account",
version=__version__)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
(options, args) = parser.parse_args()
if options.verbose:
logging.basicConfig(level=logging.INFO)
cherrypy.log.error_log.setLevel(logging.INFO)
cherrypy.log.access_log.setLevel(logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
cherrypy.log.error_log.setLevel(logging.WARNING)
cherrypy.log.access_log.setLevel(logging.WARNING)
conf = {'/css': {'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(current_dir, 'static','css')},
'/js': {'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(current_dir, 'static','js')},
'/__history__.html': {'tools.staticfile.on': True, # for Back and Forward buttons in the browser works as undo and redo
'tools.staticfile.filename':
os.path.join(current_dir, 'static','__history__.html')},
}
cherrypy.quickstart(Main(),'/',config=conf)
if __name__ == '__main__':
main()
| Python |
# Load files into local sesame server
# run "load3account.py -h" for more info
__version__ = "$Id$"
import os
import sys
import logging
import StringIO
import ConfigParser
from optparse import OptionParser
from httplib import BadStatusLine
import rdflib
from rdflib import plugin
plugin.register(
'sparql', rdflib.query.Processor,
'rdfextras.sparql.processor', 'Processor')
plugin.register(
'sparql', rdflib.query.Result,
'rdfextras.sparql.query', 'SPARQLQueryResult')
from rdflib.graph import ConjunctiveGraph
from semprog import pysesame
from imports.loadRDF import loadGraph2Server, loadFile2Server
from imports import ofx2n3
from imports import bll_portfolio_position
from imports import bll_portfolio_activity
from imports import bll_checking
from imports import citina_portfolio_position, citina_portfolio_activity
from imports import pdf1
from imports import ofx2flt
from imports import flt2n3
from imports import directnet_checking
bank_modules={
'ofx':ofx2flt,
'OFX':ofx2flt,
'qfx':ofx2flt,
'QFX':ofx2flt,
'ofx2rdf':ofx2n3,
'bllpp':bll_portfolio_position,
'bllpt':bll_portfolio_activity,
'bllchck':bll_checking,
'citipp':citina_portfolio_position,
'citipt':citina_portfolio_activity,
'pdf1':pdf1,
'flt':flt2n3,
'dc':directnet_checking,
}
usage = "usage: %prog [options] [input-file]*"
description="""load bank statement files into Sesame server.
The format of the bank statement is determined by the '-f' ('--format') option
or, if not given, by the input file's extension. To get more help on a specific
format type "%prog -f <format> help".
The following formats are currently supported:
RDF file, not just banking
('xml' for RDF/XML, 'notation3', 'trix', 'n3', 'nt', 'rdfa'),
OFX v1.0.2 ('ofx', 'qfx'),
Bank Leumi portfolio position in Excel file ('bllpp')
"""
parser = OptionParser(usage=usage,
description=description,
version=__version__)
parser.add_option("-u", "--url",
dest="url",default="http://localhost:8080/openrdf-sesame",
help="""Send output to Sesame server URL or a file-name.
Default http://localhost:8080/openrdf-sesame""")
parser.add_option("-f", "--format", dest="format",default=None,
help="""Input format. For help type "-f <fmt> help".
By default use file extension.
Supported values:
'xml' (for RDF/XML), 'notation3', 'trix', 'n3', 'nt', 'rdfa', 'owl'
"""+", ".join(bank_modules.keys())
)
parser.add_option("-S", "--config", dest="cfg",default=None,
help="Read/Write hash convertion from a config file")
parser.add_option("-s", "--sha1",
action="store_true", dest="hashing",
help="Hash senstive fields")
parser.add_option("-x", "--extend", dest="ext",default=None,
help="""Supply one or more tag=value pairs to be used in the
import process. For example -x ACCTID=A1, -x DTASOF=20091121 or
-x "ACCTID=A1 DTASOF=20091121"
""")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose")
(options, args) = parser.parse_args()
if options.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
if options.cfg:
options.hashing=ConfigParser.RawConfigParser()
options.hashing.optionxform = str # options are case sensitive
options.hashing.read(options.cfg)
# parse the flt line extension to be added to each line.
if options.ext:
fltparams=flt2n3.parse(options.ext)
else:
fltparams=None
for fname in args:
logging.info("Loading "+fname+" to "+options.url)
try:
basename,ext = os.path.splitext(os.path.basename(fname))
if ext.startswith('.'): ext=ext[1:]
if options.format:
format = options.format
else:
format = ext
if format in ['xml', 'notation3', 'trix', 'n3', 'nt', 'rdfa', 'owl']:
if fname=="help":
print >>sys.stderr,"Any RDF file"
continue
if format=='owl':
format = 'xml'
if not options.url.startswith("http://"):
pass
else:
loadFile2Server(fname,options.url,format,basename)
elif format in bank_modules:
module=bank_modules[format]
module.fltparams=fltparams
if fname=="help":
if hasattr(module, 'help'):
print >>sys.stderr,module.help
else:
print >>sys.stderr,module.__doc__
continue
try:
if hasattr(module, 'pre') and callable(module.pre):
tmpfile=StringIO.StringIO()
logging.info('%s','converting to temp file in memory')
module.pre(fname,tmpfile,options.hashing)
tmpfile.seek(0)
fname=tmpfile
outf = StringIO.StringIO()
module.flat(fname,outf,"/"+basename,options.hashing)
except IOError, e:
logging.error("I/O Error converting %s to flat N3: %s" % (fname, e))
except Exception, e:
logging.error("I/O Error converting %s to flat N3: %s" % (fname, e))
except:
logging.error("Error converting %s to flat N3" % (fname))
else:
outf.seek(0)
if options.url == "null":
pass
elif not options.url.startswith("http://"):
fp = open(options.url,"w")
fp.write(outf.read())
fp.close
else:
loadFile2Server(outf,options.url,"n3",basename)
else:
logging.error("Unknwon file extension "+ext)
except IOError, e:
if hasattr(e, 'reason'):
logging.error('We failed to reach a server.')
logging.error('Reason: '+str(e.reason))
elif hasattr(e, 'code'):
if e.code != 204:
logging.error('The server couldn\'t fulfill the request.')
logging.error('Error code: %s'%e.code)
except BadStatusLine, e:
logging.error("Bad status line"+e.line)
except Exception , e:
if options.verbose:
logging.error("Exception %s"%e)
else:
logging.error("Exception")
if options.cfg:
with open(options.cfg, 'wb') as cfgFp: options.hashing.write(cfgFp)
| Python |
########################################################################
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
########################################################################
""" ofxml2flat OFX-file flat-file
convert XML OFX file to flat lines.
"""
# TODO support INVOOLIST
from xml.dom.minidom import parse
import sys
from n3tagvalue import stagvalue,n3header,n3secid
import re
fout =sys.stdout # Where to write to. Modified by flat
hashing=False
def p(pnode,pre=''):
"""Debug utility"""
#print >>fout,"%s:%s\n" % (pre,pnode.nodeType)
nodelist=pnode.childNodes
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
print "%s%s\n" % (pre,node.data.strip())
elif node.nodeType == node.ELEMENT_NODE:
print "%s%s:\n" % (pre,node.tagName)
p(node,'+'+pre)
def get(s,tag,mandatory=False):
TAG=tag.upper().strip()
try:
VAL=s.getElementsByTagName(TAG)[0].firstChild.data.strip()
except:
if mandatory:
raise Exception('Missing mandatory tag %s'%tag)
return
return VAL
def sprt_tag(s,tag,mandatory=True,scale=1,ntag=None):
VAL=get(s,tag,mandatory)
if ntag:
ntag=ntag.upper()
else:
ntag=tag.upper()
if VAL:
if scale!=1:
return stagvalue(ntag,"%f"%(float(VAL)*scale))
else:
return stagvalue(ntag,VAL)
def prt_tag(s,tags,mandatory=True,ntag=None,scale=1,scale_assetclass=None):
mandatory=False # TODO, remove this!!!
if isinstance(tags,basestring):
tags=[tags]
if scale_assetclass in ['DEBT']:
scale=scale/100.0
if scale_assetclass in ['OPT']:
scale=scale*100.0
for tag in tags:
v=sprt_tag(s,tag,mandatory=mandatory,scale=scale,ntag=ntag)
if v:
print >>fout,v,
def prt_secid(s,acctid=None):
uniqueid=s.getElementsByTagName('UNIQUEID')[0].firstChild.data.strip()
uniqueidtype=s.getElementsByTagName('UNIQUEIDTYPE')[0].firstChild.data.strip()
print >>fout,n3secid(uniqueidtype,uniqueid),
pacctid(acctid)
def pacctid(acctid):
if acctid:
print >>fout,stagvalue('ACCTID',acctid,hashing=hashing),
def prt_cashacc(acctid,currency,p=None,accttype='CHECKING'):
if p:
try:
accttype=get(p,'SUBACCTFUND',mandatory=True)
except:
accttype=get(p,'ACCTTYPE',mandatory=True)
print >>fout,n3secid('CHECKING',currency),
pacctid(acctid)
print >>fout,stagvalue("ACCTTYPE",accttype),
if accttype in ['CHECKING', 'CASH']:
print >>fout,stagvalue('ASSETCLASS','CHECKING'),
def flat(fin,_fout,context=None,_hashing=False):
global fout,hashing
fout=_fout
hashing=_hashing
if isinstance(fin,str):
fin=open(fin)
else:
fin.seek(0)
n3header(fout,"$Id$"[1:-1],context)
dom = parse(fin) # parse an XML file by name
# Start with information on assets. In the examples I have of OFX files, this information
# appeared in the end of the file, but it should be processed first so latter on when processing
# holdings it will be easier to locate the relevant asset.
seclist=dom.getElementsByTagName('SECLIST')
if seclist:
for s in seclist[0].childNodes:
print >>fout,"a3:flatLine [ ",
if s.nodeType != s.ELEMENT_NODE:
continue # Skip comments
prt_secid(s)
ticker=get(s,'TICKER')
if ticker:
ticker=ticker.split()[0] # Some banks put their FIID after the ticker
print >>fout,stagvalue('TICKER',ticker),
v=get(s,'FIID')
if v:
print >>fout,stagvalue('FIID',v),
v = get(dom,'BROKERID')
if v:
print >>fout,stagvalue('BROKERID',v)
else:
v=get(dom,'ORG')
print 'FIID found but cant find BROKERID, using ORG instead'
if v:
print >>fout,stagvalue('ORG',v)
if get(s,'YIELD'):
if get(s,'DTYIELDASOF')!=get(s,'DTASOF'):
raise Exception("price and yield dates dont match")
prt_tag(s,['SECNAME','DTASOF'])
assetclass=s.tagName[:-4]
print >>fout,stagvalue("ASSETCLASS",assetclass),
prt_tag(s,'UNITPRICE',scale_assetclass=assetclass) # TODO check if it is better to scale the UNITS instead (looks like DEBT->UNITPRICE OPT->UNITS)
prt_tag(s,['ASSETCLASS','PARVALUE','DEBTTYPE','DEBTCLASS','DTMAT',
'COUPONRT','COUPONFREQ','MFTYPE','STOCKTYPE','YIELD'],
mandatory=False)
print >>fout,"];"
# Credit card accounts
stmttrnrs=dom.getElementsByTagName('CCSTMTTRNRS')
if stmttrnrs:
for stmtrs in stmttrnrs:
print >>fout,"a3:flatLine [ ",
acctid=get(stmtrs,'ACCTID')
curdef=get(stmtrs,'CURDEF')
# bind togther all the information about the cash holding.
prt_cashacc(acctid,curdef,accttype='CREDITCRD')
# info on the account
# bind togther all the information about the account.
# The DTSTART, DTEND are bounded to the unique pair <ACCTID,FILE> but the FILE is not
# specified because it is implicitly assumed everywhere in the line file
prt_tag(stmtrs,['CURDEF',
#'DTSTART','DTEND' # FIXME there are too many transaction outside range or the range is too big to be full
])
prt_tag(dom,['ORG','FID'],mandatory=False)
# info on the cash holding
l=stmtrs.getElementsByTagName('LEDGERBAL')[0]
prt_tag(l,'DTASOF')
prt_tag(l,'BALAMT',ntag='UNITS') # Ignore AVAILBAL
print >>fout,"];"
# Generate information on all transactions
for t in stmtrs.getElementsByTagName('STMTTRN'):
print >>fout,"a3:flatLine [ ",
# bind all information on transaction with the cash holding.
prt_cashacc(acctid,curdef,accttype='CREDITCRD')
prt_tag(t,['TRNTYPE','FITID'])
prt_tag(t,'TRNAMT',ntag='UNITS')
prt_tag(t,'DTPOSTED',ntag='DTSETTLE')
prt_tag(t,'DTAVAIL',mandatory=False)
prt_tag(t,['DTUSER','CHECKNUM','REFNUM','NAME','MEMO'],mandatory=False)
print >>fout,"];"
# Checking accounts
stmttrnrs=dom.getElementsByTagName('STMTTRNRS')
if stmttrnrs:
for stmtrs in stmttrnrs:
print >>fout,"a3:flatLine [ ",
acctid=get(stmtrs,'ACCTID')
curdef=get(stmtrs,'CURDEF')
# bind togther all the information about the cash holding.
prt_cashacc(acctid,curdef,stmtrs)
# info on the account
# bind togther all the information about the account.
# The DTSTART, DTEND are bounded to the unique pair <ACCTID,FILE> but the FILE is not
# specified because it is implicitly assumed everywhere in the line file
prt_tag(stmtrs,['CURDEF',
#'DTSTART','DTEND' # FIXME check if/when this can be done
])
prt_tag(dom,['ORG','FID'],mandatory=False)
# info on the cash holding
l=stmtrs.getElementsByTagName('LEDGERBAL')[0]
prt_tag(l,'DTASOF')
prt_tag(l,'BALAMT',ntag='UNITS') # Ignore AVAILBAL
print >>fout,"];"
# Generate information on all transactions
for t in stmtrs.getElementsByTagName('STMTTRN'):
print >>fout,"a3:flatLine [ ",
# bind all information on transaction with the cash holding.
prt_cashacc(acctid,curdef,stmtrs)
prt_tag(t,['TRNTYPE','FITID'])
prt_tag(t,'TRNAMT',ntag='UNITS')
prt_tag(t,'DTPOSTED',ntag='DTSETTLE')
prt_tag(t,'DTAVAIL',mandatory=False)
prt_tag(t,['DTUSER','CHECKNUM','REFNUM','NAME','MEMO'],mandatory=False)
print >>fout,"];"
# Investment accounts
invstmttrnrs=dom.getElementsByTagName('INVSTMTTRNRS')
if invstmttrnrs:
for invstmtrs in invstmttrnrs:
print >>fout,"a3:flatLine [ ",
# Every line should show the ACCTID
acctid=get(invstmtrs,'ACCTID')
curdef=get(invstmtrs,'CURDEF')
# bind togther all the information about the account.
# The DTSTART, DTEND are bounded to the unique pair <ACCTID,FILE> but the FILE is not
# specified because it is implicitly assumed everywhere in the line file
prt_tag(invstmtrs,['CURDEF','ACCTID',
#'DTSTART','DTEND' # Fixme check when it can be done
])
prt_tag(dom,['ORG','FID'],mandatory=False)
print >>fout,"];"
# generate statement line for CASH account
print >>fout,"a3:flatLine [ ",
prt_cashacc(acctid,curdef,accttype='CASH') # Make this match the CASH accounts used in investment transactions
prt_tag(invstmtrs,'AVAILCASH',ntag='UNITS')
prt_tag(invstmtrs,'DTASOF')
print >>fout,"];"
# Dump current portfolio of the account
for p in invstmtrs.getElementsByTagName('INVPOS'):
print >>fout,"a3:flatLine [ ",
prt_secid(p,acctid)
prt_tag(p,'DTPRICEASOF',ntag='DTASOF')
assetclass=p.parentNode.tagName[3:]
print >>fout,stagvalue("ASSETCLASS",assetclass),
prt_tag(p,'UNITPRICE',scale_assetclass=assetclass)
prt_tag(p,['POSTYPE','UNITS','MKTVAL'])
prt_tag(p,'MEMO',ntag='POSMEMO',mandatory=False) # POSMEMO in order not to confuse with transaction's MEMO
print >>fout,"];"
# Dump transactions
for trn in ['INVBUY','INVSELL']:
for p in invstmtrs.getElementsByTagName(trn):
print >>fout,"a3:flatLine [ ",
prt_secid(p,acctid)
prt_tag(p,['FITID','DTTRADE','DTSETTLE','MEMO','UNITS','COMMISSION','FEES','TOTAL'])
if trn=='INVBUY':
print >>fout,stagvalue("TRNTYPE","BUY"),
assetclass = p.parentNode.tagName[3:]
else:
print >>fout,stagvalue("TRNTYPE","SELL"),
assetclass = p.parentNode.tagName[4:]
print >>fout,stagvalue("ASSETCLASS",assetclass),
prt_tag(p,'UNITPRICE',scale_assetclass=assetclass)
prt_tag(p,['MARKUP','MARKDOWN'],mandatory=False,
scale_assetclass=assetclass)
accrdint=get(p.parentNode,'ACCRDINT') # ACCRDINT is outside the INVBUY/SELL structure.
if accrdint:
print >>fout,stagvalue('ACCRDINT',accrdint),
print >>fout,"];"
# generate line for current account
print >>fout,"a3:flatLine [ ",
prt_cashacc(acctid,curdef,p)
prt_tag(p,'FITID',ntag='RELFITID')
prt_tag(p,'DTSETTLE')
prt_tag(p,'TOTAL',ntag='UNITS')
if trn=='INVBUY':
print >>fout,stagvalue('TRNTYPE','DEBIT'),
else:
print >>fout,stagvalue('TRNTYPE','CREDIT'),
print >>fout,"];"
# ACCRDINT is real money when you buy/sell a debit but it does not appear in TOTAL
if accrdint:
print >>fout,"a3:flatLine [ ",
# generate line for current account
prt_cashacc(acctid,curdef,p)
prt_tag(p,'FITID',ntag='RELFITID')
prt_tag(p,'DTSETTLE')
print >>fout,stagvalue('UNITS',accrdint),
if trn=='INVBUY':
print >>fout,stagvalue('TRNTYPE','DEBIT'),
else:
print >>fout,stagvalue('TRNTYPE','CREDIT'),
print >>fout,stagvalue('PAYEEID','ACCRDINT'), # The money is not coming from the Asset issuer but from the side selling/buying the asset to us.
print >>fout,"];"
for p in invstmtrs.getElementsByTagName('INCOME'):
print >>fout,"a3:flatLine [ ",
prt_secid(p,acctid)
prt_tag(p,['FITID','DTTRADE','DTSETTLE','MEMO','TOTAL'])
prt_tag(p,'INCOMETYPE',ntag='TRNTYPE')
print >>fout,"];"
# generate line for current account
print >>fout,"a3:flatLine [ ",
prt_cashacc(acctid,curdef,p)
prt_tag(p,'FITID',ntag='RELFITID')
prt_tag(p,'DTSETTLE')
prt_tag(p,'TOTAL',ntag='UNITS')
print >>fout,stagvalue('TRNTYPE','CREDIT'),
print >>fout,"];"
for p in invstmtrs.getElementsByTagName('INVEXPENSE'):
print >>fout,"a3:flatLine [ ",
prt_secid(p,acctid)
prt_tag(p,['FITID','DTTRADE','DTSETTLE','MEMO'])
prt_tag(p,'TOTAL',scale=-1)
memo=get(p,'MEMO')
if re.search(r"\bTAX\b",memo,re.IGNORECASE):
prt_tag(p,'TOTAL',ntag='TAXES')
print >>fout,stagvalue("TRNTYPE","TAX"),
elif re.search(r"\bFEE\b",memo,re.IGNORECASE):
prt_tag(p,'TOTAL',ntag='FEES')
print >>fout,stagvalue("TRNTYPE","FEE"),
else:
print "Unknown expense",memo
prt_tag(p,'TOTAL',ntag='COMMISSION')
print >>fout,stagvalue("TRNTYPE","EXPENSE"),
print >>fout,"];"
# generate line for current account
print >>fout,"a3:flatLine [ ",
prt_cashacc(acctid,curdef,p)
prt_tag(p,'FITID',ntag='RELFITID')
prt_tag(p,'DTSETTLE')
prt_tag(p,'TOTAL',ntag='UNITS',scale=-1)
print >>fout,stagvalue('TRNTYPE','DEBIT'),
print >>fout,"];"
for p in invstmtrs.getElementsByTagName('TRANSFER'):
print >>fout,"a3:flatLine [ ",
prt_secid(p,acctid)
prt_tag(p,['FITID','DTTRADE','DTSETTLE','MEMO','UNITS'])
prt_tag(p,'TFERACTION',ntag='TRNTYPE')
print >>fout,"];"
# note that TRANSFER does not have a SUBACCTFUND to balance with
for p in invstmtrs.getElementsByTagName('INVBANKTRAN'):
print >>fout,"a3:flatLine [ ",
prt_cashacc(acctid,curdef,p)
prt_tag(p,['TRNTYPE','FITID','NAME','MEMO'])
prt_tag(p,'DTPOSTED',ntag='DTSETTLE')
prt_tag(p,'TRNAMT',ntag='UNITS')
print >>fout,"];"
dom.unlink()
print >>fout,"."
| Python |
# Load an RDF file into local sesame server
#import os
#import sys
#from optparse import OptionParser
#from httplib import BadStatusLine
from rdflib.graph import ConjunctiveGraph
from semprog import pysesame
__version__ = "$Id$"
def loadGraph2Server(g,url,fname=None):
data = g.serialize(format='xml')
c = pysesame.connection(url)
c.use_repository('3account')
c.putdata(data,context=fname)
def loadFile2Server(fname,url,format,context=None):
g = ConjunctiveGraph()
if isinstance(fname,str):
fp = open(fname)
if not context:
context = os.path.basename(fname)
else:
fp=fname
g.parse(fp,format=format,publicID=context)
loadGraph2Server(g,url,"<file://%s>"%context)
| Python |
########################################################################
# Copyright (C) 2009 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
########################################################################
"""Convert BLL investment portfolio position XLS file to flat-OFX-RDF and
load into Sesame server.
"""
from csv2flt import *
import re
from xls2csv2 import xls2csv2
from n3tagvalue import *
def acctidre(name):
return '(?P<%s>[\d\-\/]+)'%name
def secid(name):
return '(?P<%s>\d{6,7})'%name
htf_list=[
#version 0
([
['', '', '', '', '', '', '', '', '', '', ''],
['\xee\xe1\xe8 \xfa\xe9\xf7', '', '', '', '', '', '', '', '', '', ''], # In Hebrew: Portfolio view
['('+'\xfa\xe0\xf8\xe9\xea: '+eurdre('DTASOF','/')+')?', '', '', '', '', '',
'', '', '', '('+'\xfa\xe0\xf8\xe9\xea: '+eurdre('DTASOF','/')+')?', ''],
['', '', '', '', '', '', '', '', '', '', ''],
['', '', '', '', '', '', '', '', '', '', ''],
['\xee\xf1\xf4\xf8 \xf0\xe9\xe9\xf8',
'\xf9\xed \xf0\xe9\xe9\xf8',
'\xe0\xf8\xe5\xf2 \xeb\xf1\xf4\xe9',
'\xf9\xf2\xf8 \xf7\xf0\xe9\xe4 \xee\xee\xe5\xf6\xf2',
'\xeb\xee\xe5\xfa \xe0\xe7\xe6\xf7\xe4',
'\xf9\xe5\xe5\xe9 \xe0\xe7\xe6\xf7\xe4',
'\xf9\xf2\xf8',
'\xe0\xe7\xe5\xe6 \xf9\xe9\xf0\xe5\xe9 \xe9\xe5\xee\xe9',
'\xf8\xe5\xe5\xe7 \xe1-%',
'\xf8\xe5\xe5\xe7 \xe1\xf9\xe7',
'\xf9\xed \xfa\xe9\xf7']
],
[secid('SECID')+'.0',
strre('SECNAME'),
'(0\.0)|(\xec\xe0 \xf7\xe9\xe9\xed)',
pvaluere('AVG_COST',1),
pvaluere('UNITS',1),
pvaluere('MKTVAL',1),
pvaluere('UNITPRICE',1), # Surprisngly this can be in different currencies for different assets and is for 100 Units for bonds
floatre('DAY_CHANGE_PERCENT'),
floatre('CHANGE_PERCENT'),
valuere('CGAIN_ILS',1),
acctidre('ACCTID')],
None),
#version1
([
['', '', '', '', '', '', '', '', '', '', ''],
['\xee\xe1\xe8 \xe0\xe9\xf9\xe9 - \xe4\xe0\xe7\xe6\xf7\xe5\xfa \xf9\xec\xe9',
'', '', '', '', '', '', '', '', '', ''], # In Hebrew: Portfolio view
['\xfa\xe0\xf8\xe9\xea: '+eurdre('DTASOF','/'), '',':\xfa\xe9\xf7',
acctidre('ACCTID'), '', '', '', '', '', '', ''],
['', '', '', '', '', '', '', '', '', '', ''],
['', '', '', '', '', '', '', '', '', '', ''],
['\xee\xf1\xf4\xf8 \xf0\xe9\xe9\xf8',
'\xf9\xed \xf0\xe9\xe9\xf8',
'\xe0\xf8\xe5\xf2 \xeb\xf1\xf4\xe9',
'\xf9\xf2\xf8 \xf7\xf0\xe9\xe4 \xee\xee\xe5\xf6\xf2',
'\xeb\xee\xe5\xfa \xe0\xe7\xe6\xf7\xe4',
'\xf9\xe5\xe5\xe9 \xe0\xe7\xe6\xf7\xe4',
'% \xee\xe4\xfa\xe9\xf7', '\xf9\xf2\xf8',
'% \xf9\xe9\xf0\xe5\xe9 \xe9\xe5\xee\xe9',
'\xf8\xe5\xe5\xe7 \xe1-%',
"\xf8\xe5\xe5\xe7 \xe1\xf9''\xe7"]
],
[secid('SECID')+'.0',
strre('SECNAME'),
'(0\.0)|((\xec\xe0 )?\xf7\xe9\xe9\xed)',
pvaluere('AVG_COST',1),
pvaluere('UNITS',1),
pvaluere('MKTVAL',1),
pvaluere('PART_OF_PORTFOLIO',1),
pvaluere('UNITPRICE',1), # Surprisngly this can be in different currencies for different assets and is for 100 Units for bonds
floatre('DAY_CHANGE_PERCENT'),
floatre('CHANGE_PERCENT'),
valuere('CGAIN_ILS',1),
],
None),
#version2
([
['', '', '', '', '', '', '', '', '', '', '', ''],
['\xee\xe1\xe8 \xe0\xe9\xf9\xe9 - \xe4\xe0\xe7\xe6\xf7\xe5\xfa \xf9\xec\xe9',
'', '', '', '', '', '', '', '', '', '', ''], # In Hebrew: Portfolio view
['\xfa\xe0\xf8\xe9\xea: '+eurdre('DTASOF','/'), '',':\xfa\xe9\xf7',
acctidre('ACCTID'), '', '', '', '', '', '', '', ''],
['', '', '', '', '', '', '', '', '', '', '', ''],
['', '', '', '', '', '', '', '', '', '', '', ''],
['\xee\xf1\xf4\xf8 \xf0\xe9\xe9\xf8',
'\xf9\xed \xf0\xe9\xe9\xf8',
'\xe0\xf8\xe5\xf2 \xeb\xf1\xf4\xe9',
'\xf9\xf2\xf8 \xf7\xf0\xe9\xe4 \xee\xee\xe5\xf6\xf2',
'\xeb\xee\xe5\xfa \xe0\xe7\xe6\xf7\xe4',
'\xf9\xe5\xe5\xe9 \xe0\xe7\xe6\xf7\xe4',
'% \xee\xe4\xfa\xe9\xf7', '\xf9\xf2\xf8',
'% \xf9\xe9\xf0\xe5\xe9 \xe9\xe5\xee\xe9',
'\xf8\xe5\xe5\xe7 \xe1-%',
"\xf8\xe5\xe5\xe7 \xe1\xf9''\xe7", '\xee\xf1 \xe4\xfa\xe9\xf7']
],
[secid('SECID')+'.0',
strre('SECNAME'),
'(0\.0)|((\xec\xe0 )?\xf7\xe9\xe9\xed)',
pvaluere('AVG_COST',1),
pvaluere('UNITS',1),
pvaluere('MKTVAL',1),
pvaluere('PART_OF_PORTFOLIO',1),
pvaluere('UNITPRICE',1), # Surprisngly this can be in different currencies for different assets and is for 100 Units for bonds
floatre('DAY_CHANGE_PERCENT'),
floatre('CHANGE_PERCENT'),
valuere('CGAIN_ILS',1),
acctidre('ACCTID'),
],
None),
#version3
([
['', '', '', '', '', '', '', '', '', '', '', ''],
['\xee\xe1\xe8 \xe0\xe9\xf9\xe9 - \xe4\xe0\xe7\xe6\xf7\xe5\xfa \xf9\xec\xe9',
'', '', '', '', '', '', '', '', '', '', ''], # In Hebrew: Portfolio view
['\xfa\xe0\xf8\xe9\xea: '+eurdre('DTASOF','/'), '',':\xfa\xe9\xf7',
acctidre('ACCTID'), '', '', '', '', '', '', '', ''],
['', '', '', '', '', '', '', '', '', '', '', ''],
['', '', '', '', '', '', '', '', '', '', '', ''],
['\xee\xf1\xf4\xf8 \xf0\xe9\xe9\xf8',
'\xf9\xed \xf0\xe9\xe9\xf8',
'\xe0\xf8\xe5\xf2 \xeb\xf1\xf4\xe9',
'\xf9\xf2\xf8 \xf7\xf0\xe9\xe4',
'\xeb\xee\xe5\xfa',
'\xf9\xe5\xe5\xe9',
'% \xee\xe4\xfa\xe9\xf7', '\xf9\xf2\xf8',
'% \xf9\xe9\xf0\xe5\xe9',
'\xf8\xe5\xe5\xe7 \xe1-%',
'\xf8\xe5\xe5\xe7',
'\xee\xf1 \xe4\xfa\xe9\xf7'],
['', '', '', '\xee\xee\xe5\xf6\xf2', '\xe0\xe7\xe6\xf7\xe4',
'\xe0\xe7\xe6\xf7\xe4', '', '', '\xe9\xe5\xee\xe9', '',
'\xe1\xf9"\xe7', ''],
],
[secid('SECID')+'.0',
strre('SECNAME'),
'(0\.0)|((\xec\xe0 )?\xf7\xe9\xe9\xed)',
pvaluere('AVG_COST',1),
pvaluere('UNITS',1),
pvaluere('MKTVAL',1),
pvaluere('PART_OF_PORTFOLIO',1),
pvaluere('UNITPRICE',1), # Surprisngly this can be in different currencies for different assets and is for 100 Units for bonds
floatre('DAY_CHANGE_PERCENT'),
floatre('CHANGE_PERCENT'),
valuere('CGAIN_ILS',1),
acctidre('ACCTID'),
],
None),
]
def pre(fname,csvfile,hashing=False):
xls2csv2(fname,csvfile,'hebrew')
def flat(csvfile,fout,context=None,hashing=False):
n3header(fout,"$Id$"[1:-1],context)
h,t,f,version=readcsvtable_multiformat(csvfile,htf_list)
if h==None:
raise Exception("File does not match template")
l=h[2]
dtasof = "%s%s%s"%(l['DTASOF_year'],l['DTASOF_month'],l['DTASOF_day'])
accttype="INVESTMENT"
if version==1 or version==2 or version==3:
acctid=h[2]['ACCTID']
currate=fltparams.get('CURRATE',[]) if fltparams else []
if currate:
currate=float(currate[0])
for l in t:
print >>fout,"a3:flatLine [ ",
units=float(l['UNITS'])
unitprice=float(l['UNITPRICE'])/100
mktval=float(l['MKTVAL'])
if abs(mktval-units*unitprice)>0.5:
print mktval,units*unitprice
print >>fout,stagvalue('DTASOF',dtasof),stagvalue('CURDEF','ILS'),
print >>fout,n3secid('TASE',l['SECID']),
print >>fout,stagvalue('SECNAME',l['SECNAME'],language="hebrew"),
if version==0 or version==2:
acctid=l['ACCTID']
print >>fout,stagvalue('ACCTID',acctid,hashing=hashing),
print >>fout,stagvalue('ACCTTYPE',accttype),
print >>fout,stagvalue('UNITS',units),
print >>fout,stagvalue('UNITPRICE',unitprice),
if currate:
print >>fout,stagvalue('CURRATE',currate)
mktval/=currate
print >>fout,stagvalue('MKTVAL',mktval),
print >>fout,"];"
print >>fout,"."
| Python |
########################################################################
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
########################################################################
"""Convert OFX v2.x or XML files to flat lines.
"""
help="""Convert OFX v2.x or XML files to flat lines.
"""
from ofx2xml import ofx2xml
import ofxml2flat
def flat(fin,fout,context=None,hashing=False):
ofxml2flat.flat(fin,fout,context,hashing)
| Python |
########################################################################
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
########################################################################
"""Convert OFX v1.x or QFX files to flat lines.
This is done in two stages.
The first converts the OFX to N3 file that keeps the structure of the original
OFX file.
The second stage converts the N3 file to a graph and then unstructure it.
"""
help="""Convert OFX v1.x file (MS-Money or Quicken Web Connect) to
flat OFX-RDF (in memory) and load to Sesame server
"""
import logging
import ofx2n3
from rdflib.graph import *
from rdflib.namespace import *
from rdflib.term import *
OFX = Namespace("http://www.w3.org/2000/10/swap/pim/ofx#")
A3 = Namespace("http://code.google.com/p/3account/wiki/Schema#")
XSD = Namespace("http://www.w3.org/2001/XMLSchema#")
FS = Namespace("file://")
flatLine = A3["flatLine"]
NS={'ofx':OFX, 'a3':A3}
def getSimplifiedName(uri):
if "#" in uri: return uri[uri.rfind("#") + 1:]
return uri[uri.rfind("/") + 1:]
def pre(fname,fout,hashing=False):
ofx2n3.flat(fname,fout,hashing=hashing)
def cleanval(v):
if not isinstance(v,URIRef):
return v
v=str(v)
for ns in NS.itervalues():
if v.startswith(str(ns)):
return Literal(v[len(str(ns)):])
return Literal(v)
def cleanvar(l):
l = l[l.rfind(":")+1:] if ":" in l else l
l = l if l[0].isalpha() else l[1:]
return l
def vars(lst):
res=[]
for l in lst:
assert isinstance(l,str)
res.append("?"+cleanvar(l))
return " ".join(res)
def pairs(x,lst):
res=[]
for l in lst:
assert isinstance(l,str)
if l[0]=="+":
l=l[1:]
res[-2]+=" ; ofx:"+l.upper()+" ?"+l
else:
if l[0]=="?":
sstr=" optional { ?"+x+" "
estr=" } .\n"
l=l[1:]
else:
sstr="?"+x+" "
estr=" .\n"
while ":" in l:
sstr+="ofx:"+l[:l.find(":")]+" [ "
estr=" ]"+estr
l=l[l.find(":")+1:]
sstr+="ofx:"+l.upper()+" ?"+l
res.append(sstr)
res.append(estr)
return " ".join(res)
def bprt(f,bNode,lst,vals):
for i,t in enumerate(lst):
v=vals[i]
if v:
t=cleanvar(t)
v=cleanval(v)
if t.isupper():
t=OFX[t]
else:
t=A3[t]
f.add((bNode,t,v))
def flatseclist(g,f,context):
secinfoT=["SECID:UNIQUEID","+UNIQUEIDTYPE","SECNAME",]
secinfoO=["?TICKER","?FIID","?RATING","?UNITPRICE","?dtasof",
"?CURRENCY:CURRATE","+CURSYM","?MEMO"]
infoTs = [("DEBT",["PARVALUE",
"DEBTTYPE",
"?DEBTCLASS",
"?COUPONRT",
"?DTCOUPON","?COUPONFREQ",
"?CALLPRICE","?YIELDTOCALL","?DTCALL","?CALLTYPE",
"?YIELDTOMAT","?DTMAT","?ASSETCLASS","?FIASSETCLASS"
]),
("MF",["?MFTYPE","?YIELD","?DTYIELDASOF",
"?MFASSETCLASS:PORTION:ASSETCLASS","+PERCENT",
"?FIMFASSETCLASS:FIPORTION:FIASSETCLASS","+PERCENT"]),
("OPT",["OPTTYPE","STRIKEPRICE","DTEXPIRE","SHPERCTRCT",
"?secid","?ASSETCLASS","?FIASSETCLASS"]),
("OTHER",["?TYPEDESC","?ASSETCLASS","?FIASSETCLASS"]),
("STOCK",["?STOCKTYPE","?YIELD","?DTYIELDASOF","?ASSETCLASS",
"?FIASSETCLASS"])]
for sectype,infoT in infoTs:
secinfos=g.query("select "+vars(secinfoT+secinfoO+infoT)+" {\n"+
"[ ofx:SECLIST [ ?SECTYPE ?x ] ] .\n"+
"?x ofx:SECINFO ?y .\n"+
pairs("y",secinfoT)+
pairs("x",infoT)+
pairs("y",secinfoO)+ # crazy bug in librdf 2.4.2
"}",
initNs=NS,initBindings={"SECTYPE":OFX[sectype+"INFO"]})
for secinfo in secinfos:
bNode=BNode()
f.add((context,flatLine,bNode))
f.add((bNode,
OFX["SECTYPE"],
Literal(getSimplifiedName(sectype))))
bprt(f,bNode,secinfoT+secinfoO+infoT,secinfo)
def flatinv(g,f,acctid,context):
invstmtrT=["INVACCTFROM:ACCTID","+BROKERID","CURDEF","DTASOF"]
invstmtrs=g.query("select "+vars(invstmtrT)+"{"+pairs("x",invstmtrT)+"}",
initNs=NS,initBindings={"ACCTID":acctid})
assert len(invstmtrs)==1
for invstmtr in invstmtrs:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
f.add((bNode,A3["aCCTTYPE"],Literal("INVESTMENT")))
#INVPOSLIST
invposT=["SECID:UNIQUEID","+UNIQUEIDTYPE",
"HELDINACCT","POSTYPE","UNITS","UNITPRICE","MKTVAL","DTPRICEASOF",
"?CURRENCY:CURRATE","+CURSYM","?memo"]
invposs=g.query("select ?pos "+vars(invposT)+"{"+
pairs("x",["INVACCTFROM:ACCTID"])+
"?x ofx:INVPOSLIST [ ?pos [ ofx:INVPOS ?y ] ] . "+
pairs("y",invposT)+"}",
initNs=NS,initBindings={"ACCTID":acctid})
for invpos in invposs:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,["pos"]+invposT,invpos)
#INVBAL
invbalT=["INVBAL:AVAILCASH","+MARGINBALANCE","+SHORTBALANCE"]
invbals=g.query("select "+vars(invbalT)+"{"+
pairs("x",["INVACCTFROM:ACCTID"])+
pairs("x",invbalT)+"}",
initNs=NS,initBindings={"ACCTID":acctid})
for invbal in invbals:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,invbalT,invbal)
#INVTRANLIST
invtranlistT=["DTSTART","DTEND"]
invtranlists=g.query("select "+vars(invtranlistT)+"{"+
pairs("x",["INVACCTFROM:ACCTID"])+
"?x ofx:INVTRANLIST ?y . "+
pairs("y",invtranlistT)+"}",
initNs=NS,initBindings={"ACCTID":acctid})
for invtranlist in invtranlists: # At most one INVTRANLIST per account
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,invtranlistT,invtranlist)
#INVTRANLIST:INVBUY/SELL
invtranT=["INVTRAN:FITID","+DTTRADE","SECID:UNIQUEID","+UNIQUEIDTYPE"]
invtranTopt=[
"?INVTRAN:SRVRTID","?INVTRAN:DTSETTLE","?INVTRAN:REVERSALFITID",
"?INVTRAN:mEMO",];
invbuysellT=invtranT+[
"units","UNITPRICE",
"TOTAL",
"SUBACCTSEC","SUBACCTFUND",
]
invbuysellTopt=invtranTopt+[
"?COMMISSION","?TAXES","?FEES","?LOAD",
"?CURRENCY:CURRATE","+CURSYM",
"?ORIGCURRENCY:currate","+cursym",
"?INV401KSOURCE","?LOANID",
"?WITHHOLDING","?TAXEXEMPT","?GAIN",# INVSELL
"?STATEWITHHOLDING","?PENALTY","?MARKDOWN",
"?MARKUP", # INVBUY
"?LOANPRINCIPAL","?LOANINTEREST",
"?DTPAYROLL","?PRIORYEARCONTRIB",
]
invtranOpTopt=["?ACCRDINT","?BUYTYPE","?RELFITID","?OPTBUYTYPE","?SHPERCTRCT",
"?SELLREASON","?SELLTYPE","?AVGCOSTBASIS","?OPTSELLTYPE",
"?RELTYPE","?SECURED",]
invbuysells=g.query("select ?tran ?tranOp "+
vars(invbuysellT)+vars(invbuysellTopt)+
vars(invtranOpTopt)+"{"+
pairs("x",["INVACCTFROM:ACCTID"])+
"?x ofx:INVTRANLIST [ ?tran ?z ] ."+
"?z ?tranOp ?y ."+
pairs("y",invbuysellT)+
pairs("y",invbuysellTopt)+
pairs("z",invtranOpTopt)+
"}",
initNs=NS,initBindings={"ACCTID":acctid})
for invbuysell in invbuysells:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,["tran","tranOp"]+invbuysellT+invbuysellTopt+invtranOpTopt,
invbuysell)
#INVTRANLIST:INCOME
incomeT=invtranT
incomeTopt=invtranTopt+["?OPTACTION","?units","?SHPERCTRCT","?SUBACCTSEC",
"?RELFITID","?GAIN","?INCOMETYPE","?TOTAL",
"?SUBACCTFUND","?TAXEXEMPT","?WITHHOLDING",
"?CURRENCY:CURRATE","+CURSYM",
"?ORIGCURRENCY:CURRATE","+CURSYM",
"?SUBACCTTO","?SUBACCTFROM",
"?UNITPRICE","?COMMISSION","?TAXES","?FEES","?LOAD",
"?OLDUNITS","?NEWUNITS","?NUMERATOR","?DENOMINATOR",
"?FRACCASH","?TFERACTION","?POSTYPE",
"?INVACCTFROM:acctid","+brokerid", # lower case not to coflict with the transaction's ACCTID,BROKERID
"?AVGCOSTBASIS","?UNITPRICE","?DTPURCHASE"
]
incomes=g.query("select ?tran "+
vars(incomeT)+vars(incomeTopt)+"{"+
pairs("x",["INVACCTFROM:ACCTID"])+
"?x ofx:INVTRANLIST [ ?tran ?y ] ."+
pairs("y",incomeT)+
pairs("y",incomeTopt)+
"}",
initNs=NS,initBindings={"ACCTID":acctid})
for income in incomes:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,["tran"]+incomeT+incomeTopt,income)
#INVTRANLIST:INVBANKTRAN
invbanktranT=["SUBACCTFUND"]
stmttrnT=["TRNTYPE","DTPOSTED","TRNAMT","FITID",
"?DTUSER","?DTAVAIL",
"?CORRECTFITID","?CORRECTACTION","?SRVRTID",
"?CHECKNUM","?REFNUM","?SIC","?PAYEEID","?NAME",
"?PAYEE:NAME","+ADDR1","+ADDR2","+ADDR3","+CITY","+STATE","+POSTALCODE","+COUNTRY","+PHONE",
"?BANKACCTTO:bankid","+acctid","+accttype",
"?CCACCTTO:acctid",
"?mEMO","?INV401KSOURCE",
"?CURRENCY:currate","+cursym",
"?ORIGCURRENCY:currate","+cursym",
]
invbanktrans=g.query("select "+vars(invbanktranT)+vars(stmttrnT)+"{"+
pairs("x",["INVACCTFROM:ACCTID"])+
"?x ofx:INVTRANLIST [ ofx:INVBANKTRAN ?y ] ."+
"?y ofx:STMTTRN ?z ."+
pairs("y",invbanktranT)+pairs("z",stmttrnT)+
"}",
initNs=NS,initBindings={"ACCTID":acctid})
for invbanktran in invbanktrans:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,invbanktranT+stmttrnT,invbanktran)
def flatbank(g,f,acctid,context,cc=False):
if cc:
aCCTTYPE="CC"
invstmtrT=["CCACCTFROM:ACCTID","CURDEF","LEDGERBAL:DTASOF"]
else:
aCCTTYPE="BANK"
invstmtrT=["BANKACCTFROM:ACCTID","+BANKID","+ACCTTYPE","CURDEF",
"LEDGERBAL:DTASOF"]
invstmtrs=g.query("select "+vars(invstmtrT)+"{"+pairs("x",invstmtrT)+"}",
initNs=NS,initBindings={"ACCTID":acctid})
assert len(invstmtrs)==1
for invstmtr in invstmtrs:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
f.add((bNode,A3["aCCTTYPE"],Literal(aCCTTYPE)))
#BAL
balT=["LEDGERBAL:BALAMT","+dtASOF","?AVAILBAL:bALAMT","+dTASOF"]
bals=g.query("select "+vars(balT)+"{"+
pairs("x",[aCCTTYPE+"ACCTFROM:ACCTID"])+
pairs("x",balT)+"}",
initNs=NS,initBindings={"ACCTID":acctid})
for bal in bals:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,balT,bal)
#BANKTRANLIST
invtranlistT=["DTSTART","DTEND"]
invtranlists=g.query("select "+vars(invtranlistT)+"{"+
pairs("x",[aCCTTYPE+"ACCTFROM:ACCTID"])+
"?x ofx:BANKTRANLIST ?y . "+
pairs("y",invtranlistT)+"}",
initNs=NS,initBindings={"ACCTID":acctid})
for invtranlist in invtranlists: # At most one INVTRANLIST per account
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,invtranlistT,invtranlist)
#STMTTRN
stmttrnT=["TRNTYPE","DTPOSTED","TRNAMT","FITID",
"?DTUSER","?DTAVAIL",
"?CORRECTFITID","?CORRECTACTION","?SRVRTID",
"?CHECKNUM","?REFNUM","?SIC","?PAYEEID","?NAME",
"?PAYEE:NAME","+ADDR1","+ADDR2","+ADDR3","+CITY","+STATE",
"+POSTALCODE","+COUNTRY","+PHONE",
"?BANKACCTTO:bankid","+acctid","+accttype",
"?CCACCTTO:acctid",
"?mEMO","?INV401KSOURCE",
"?CURRENCY:currate","+cursym",
"?ORIGCURRENCY:currate","+cursym",
]
invbanktrans=g.query("select "+vars(stmttrnT)+"{"+
pairs("x",[aCCTTYPE+"ACCTFROM:ACCTID"])+
"?x ofx:BANKTRANLIST ?y ."+
"?y ofx:STMTTRN ?z ."+
pairs("z",stmttrnT)+
"}",
initNs=NS,initBindings={"ACCTID":acctid})
for invbanktran in invbanktrans:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,stmttrnT,invbanktran)
def flat(fin,fout,context=None,hashing=False):
g = Graph()
g.parse(fin,format='n3',publicID=context)
f = Graph() #output graph
f.bind("ofx",OFX)
f.bind("xsd",XSD)
f.bind("a3",A3)
context=FS[context]
flatseclist(g,f,context)
accounts=g.query("select ?typ "+vars(["ACCTID"])+"{"+
"[ ?typ ?x ] . "+
pairs("x",["ACCTID"])+"}",initNs=NS)
for account in accounts:
acctid=account[1]
typ=account[0]
if typ==OFX["BANKACCTFROM"]:
logging.info("Bank statement")
flatbank(g,f,acctid,context)
elif typ==OFX["CCACCTFROM"]:
logging.info("Credit card statement")
flatbank(g,f,acctid,context,cc=True)
elif typ==OFX["INVACCTFROM"]:
logging.info("Investment statement")
flatinv(g,f,acctid,context)
else:
logging.info("Unrecognized type %s"%account[1])
fout.write(f.serialize(format='n3'))
| Python |
########################################################################
# 3account - personal finance data-base
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,USA.
########################################################################
"""Convert Direct-Net checking transactions CSV file to flat-OFX-RDF and
"""
from csv2flt import *
import re
from xls2csv2 import xls2csv2
from n3tagvalue import *
import logging
def acctidre(name):
return '(?P<%s>[\d\-]+)'%name
htf_list=[
# version 0
([
['Exported from Direct Net on ',eurdtzre('DTASOF'),'','','',''],
['Booking entries','','','','',''],
['Account number',acctidre('ACCTID'),'','','',''],
['Balance',currencyre('CURRENCY')+' '+commanumre('UNITS'),'','','',''],
['Holder',strre('OWNER'),'','','',''],
['Bank',strre('ORG'),'','','',''],
['Description',strre('ACCTMEMO'),'','','',''],
['Current date',eurdtzre('DTASOF'),'','','',''],
['Booking entries from '+eurdre('DTSTART')+' - '+eurdre('DTEND')+
' \(including provisional bookings\)','','','','',''],
['Booking date','Text','Debit','Credit','Value date','Balance'],
],
[eurdre('DTSETTLE'),
strre('MEMO'),
pvaluere('UNITS_debit')+'|', # The | is to allow also blank cells
pvaluere('UNITS_credit')+'|',
eurdre('DTAVAIL'),
valuere('TOTAL')+'|'],
[['Total of column','',valuere('UNITS_total_debit'),
valuere('UNITS_total_credit'),'','']]
),
# version 1 introduced somewhere 070501 and 070626
([
['Exported from Direct Net on',eurdtzre('DTASOF'),'','','',''],
['Booking Entries','','','','',''],
['Account Number',acctidre('ACCTID'),'','','',''],
['Balance',currencyre('CURRENCY')+' '+commanumre('UNITS'),'','','',''],
['Holder',strre('OWNER'),'','','',''],
['Bank',strre('ORG'),'','','',''],
['Description',strre('ACCTMEMO'),'','','',''],
['Current Date ',eurdtzre('DTASOF'),'','','',''],
['Booking Entries from '+eurdre('DTSTART')+' - '+eurdre('DTEND')+
' \(Including Provisional Bookings\)','','','','',''],
['Booking Date','Text','Debit','Credit','Value Date','Balance'],
],
[eurdre('DTSETTLE'),
strre('MEMO'),
pvaluere('UNITS_debit')+'|', # The | is to allow also blank cells
pvaluere('UNITS_credit')+'|',
eurdre('DTAVAIL'),
valuere('TOTAL')+'|'],
[['Total of Column','',valuere('UNITS_total_debit'),
valuere('UNITS_total_credit'),'','']]
),
# version 2 introduced before 070910
([
['Exported from Direct Net on',eurdtzre('DTASOF'),'','','',''],
['Booking Entries','','','','',''],
['Account Number',acctidre('ACCTID'),'','','',''],
['Balance',currencyre('CURRENCY')+' '+commanumreint('UNITS'),'','','',''],
['IBAN',strre('ACCTID'),'','','',''],
['Bank',strre('ORG'),'','','',''],
['Holder',strre('OWNER'),'','','',''],
['Description',strre('ACCTMEMO'),'','','',''],
['Booking Entries from '+eurdre('DTSTART')+' - '+eurdre('DTEND')+
' \(Including Provisional Bookings\)','','','','',''],
['Booking Date','Text','Debit','Credit','Value Date','Balance'],
],
[eurdre('DTSETTLE'),
strre('MEMO'),
pvaluereint('UNITS_debit')+'|', # The | is to allow also blank cells
pvaluereint('UNITS_credit')+'|',
eurdre('DTAVAIL'),
valuereint('TOTAL')+'|'],
[['Total of Column','',valuereint('UNITS_total_debit'),
valuereint('UNITS_total_credit'),'','']]
),
# version 3
([
['Exported from Direct Net on '+eurdtzre('DTASOF'),'','','','','',''],
['Bookings','','','','','',''],
['Account Details ','','','','','',''],
['Account',strre('ACCTID')+' Current account, '+strre('OWNER'),'','Balance',
currencyre('CURRENCY')+' '+valuere('UNITS')],
['IBAN',strre('ACCTID'),'',strre('ACCTMEMO'),strre('ACCTMEMO1')],
['Booking Entries from '+eurdre('DTSTART')+' - '+eurdre('DTEND')+
' \(Including Provisional Bookings\)','','',''],
['Booking Date','Text','Debit','Credit','Value Date','Balance'],
],
[eurdre('DTSETTLE'),
strre('MEMO'),
pvaluereint('UNITS_debit')+'|', # The | is to allow also blank cells
pvaluereint('UNITS_credit')+'|',
eurdre('DTAVAIL'),
valuereint('TOTAL')+'|'],
[['Total of Column','',valuereint('UNITS_total_debit'),
valuereint('UNITS_total_credit'),'','']]
),
]
def flat(csvfile,fout,context=None,hashing=False):
n3header(fout,"$Id$"[1:-1],context)
h,t,f,version=readcsvtable_multiformat(csvfile,htf_list)
line_num=len(htf_list[version][0])
hl=0
if version<3:
l=h[hl]
hl+=1
dtasof = l['DTASOF_year']+l['DTASOF_month']+l['DTASOF_day']+\
l['DTASOF_hours']+l['DTASOF_min']+l['DTASOF_TZ']
hl+=1
acctid=h[hl]['ACCTID']
hl+=1
currency=h[hl]['CURRENCY']
units=h[hl]['UNITS'].replace(',','')
hl+=1
if version>=2:
acctid1='IBEN='+h[hl]['ACCTID']
hl+=1
if version<2:
owner=h[hl]['OWNER']
hl+=1
org=h[hl]['ORG']
hl+=1
else:
org=h[hl]['ORG']
hl+=1
owner=h[hl]['OWNER']
hl+=1
acctmemo=h[hl]['ACCTMEMO']
hl+=1
if version<2:
l=h[hl]
hl+=1
dtasof1 = l['DTASOF_year']+l['DTASOF_month']+l['DTASOF_day']+\
l['DTASOF_hours']+l['DTASOF_min']+l['DTASOF_TZ']
if dtasof != dtasof1:
print 'Warning Export date and Current date dont match'
l=h[hl]
hl+=1
dtstart = l['DTSTART_year']+l['DTSTART_month']+l['DTSTART_day']
dtend = l['DTEND_year']+l['DTEND_month']+l['DTEND_day']
else:
l=h[0]
dtasof = l['DTASOF_year']+l['DTASOF_month']+l['DTASOF_day']+\
l['DTASOF_hours']+l['DTASOF_min']+l['DTASOF_TZ']
l=h[3]
acctid=l['ACCTID']
owner=l['OWNER']
currency=l['CURRENCY']
units=l['UNITS'].replace(',','')
l=h[4]
acctid1=l['ACCTID']
acctmemo=l['ACCTMEMO']
l=h[5]
dtstart = l['DTSTART_year']+l['DTSTART_month']+l['DTSTART_day']
dtend = l['DTEND_year']+l['DTEND_month']+l['DTEND_day']
curdef=currency
accttype='CHECKING'
secid=currency
print >>fout,"a3:flatLine [ ",
print >>fout,stagvalue('ACCTID',acctid,hashing=hashing),
print >>fout,stagvalue('CURDEF',curdef),
print >>fout,stagvalue('ACCTTYPE',accttype),
print >>fout,"];"
print >>fout,"a3:flatLine [ ",
print >>fout,stagvalue('ACCTID',acctid,hashing=hashing)
print >>fout,stagvalue('DTASOF',dtasof),stagvalue('UNITS',units),
print >>fout,"];"
total_debit=0
total_credit=0
previouse_dtsettle=None
for l in t:
line_num+=1
print >>fout,"a3:flatLine [ ",
dtposted= l['DTSETTLE_year']+l['DTSETTLE_month']+l['DTSETTLE_day']
assert len(dtposted)==8
# There can be multiple transactions on the same day
# (each will have a different NLINE so it will be taken to be a
# different transaction) The newest appear first.
assert previouse_dtsettle==None or previouse_dtsettle>=dtposted
previouse_dtsettle=dtposted
# TODO: what is dtvalue (it is not DTSETTLE or DTTRADE, I checkecked
# information on individual transactions.)
dtavail= l['DTAVAIL_year']+l['DTAVAIL_month']+l['DTAVAIL_day']
assert len(dtavail)==8
memo=l['MEMO']
debit=l['UNITS_debit']
credit=l['UNITS_credit']
if debit and credit:
logging.error("Debit and Credit in same line %d"%line_num)
raise Exception()
if debit:
debit=float(debit)
total_debit+=debit
trnamt=-debit
trntype='DEBIT'
elif credit:
credit=float(credit)
total_credit+=credit
trnamt=credit
trntype='CREDIT'
else:
logging.error("no credit or debit %d"%line_num)
raise Exception()
strn=[]
strn.append(stagvalue('ACCTID',acctid,hashing=hashing))
strn.append(stagvalue('DTPOSTED',dtposted))
strn.append(stagvalue('TRNTYPE',trntype))
if units: strn.append(stagvalue('TRNAMT',trnamt))
strn=" ".join(strn)
print >>fout,stagvalue('FITID',hash(strn)),
print >>fout,strn,
print >>fout,stagvalue('DTASOF',dtasof),
print >>fout,stagvalue('DTAVAIL',dtavail),
memo1=memo[:45].strip()
memo2=memo[45:].strip()
print >>fout,stagvalue('mEMO',memo1,hashing=hashing),
if memo2:
print >>fout,stagvalue('mEMO',memo2,hashing=hashing),
# using match force the number to appear at the start
t_units=re.match( ',?(?P<U>\d{1,3}(,\d{3})*) [A-Z0-9]',
memo2)
if t_units:
t_units=t_units.group('U')
secname=memo2[len(t_units)+1:].strip()
t_units=float(t_units.replace(',',''))
if not secname:
logging.error('missing SECNAME %d'%line_num)
raise Exception()
else:
secname=None
if memo1 in ['Securities purchase', 'Deposit', 'Reversal Redemption']:
print >>fout,stagvalue('SECNAME',secname),
print >>fout,stagvalue('NAME',secname,hashing=hashing),
print >>fout,stagvalue('tran','BUY'),
if t_units:
print >>fout,stagvalue('units',t_units),
else:
logging.warning('%s','Units missing')
elif memo1 in ['Reverse split', 'split','Stock dividend/spin-off']:
print >>fout,stagvalue('SECNAME',secname),
print >>fout,stagvalue('NAME',secname,hashing=hashing),
print >>fout,stagvalue('tran','SPLIT'),
if t_units:
print >>fout,stagvalue('units',t_units),
else:
logging.warning('%s','Units missing')
elif memo1 in ['Securities sale',
'Redemption','Redemption of fund units',
'Capital repayment','Custody account withdrawal']:
print >>fout,stagvalue('SECNAME',secname),
print >>fout,stagvalue('NAME',secname,hashing=hashing),
print >>fout,stagvalue('tran','SELL'),
if t_units:
print >>fout,stagvalue('units',-t_units),
else:
logging.info('Missing units')
elif memo1=='Cash dividend':
if not t_units:
logging.error('Missing units %s %d'%(memo1,line_num))
raise Exception()
print >>fout,stagvalue('SECNAME',secname),
print >>fout,stagvalue('NAME',secname,hashing=hashing),
print >>fout,stagvalue('tran','DIV'),
print >>fout,stagvalue('units',t_units),
elif memo1=='Capital gain':
if not t_units:
logging.error('Missing units %s %d'%(memo1,line_num))
raise Exception()
print >>fout,stagvalue('SECNAME',secname),
print >>fout,stagvalue('NAME',secname,hashing=hashing),
print >>fout,stagvalue('tran','CGAIN'),
print >>fout,stagvalue('units',t_units),
elif memo1=='Interest payment' or memo1=='Equalisation payment':
if not t_units:
logging.error('Missing units %s %d'%(memo1,line_num))
raise Exception()
print >>fout,stagvalue('SECNAME',secname),
print >>fout,stagvalue('NAME',secname,hashing=hashing),
print >>fout,stagvalue('tran','INT'),
print >>fout,stagvalue('units',t_units),
elif memo1=='Fid. call deposit int. settlemt.':
print >>fout,stagvalue('tran','INT'),
print >>fout,stagvalue('NAME',memo2,hashing=hashing), # XACCTID
elif memo1 in ['Fiduciary call deposit',
'Fiduciary call deposit - increase']:
print >>fout,stagvalue('tran','CREDIT'),
print >>fout,stagvalue('NAME',memo2,hashing=hashing), #XACCTID
elif memo1 in ['Fiduciary call dep. - liquidation']:
print >>fout,stagvalue('tran','DEBIT'),
print >>fout,stagvalue('NAME',memo2,hashing=hashing), #XACCTID
elif memo1=='Payment order' and memo2.startswith('MANAGEMENT FEE'):
pass
elif memo1 in ['Portfolio management fee',
'Balance settlement of expenses',
'Reversal Portfolio management fee',
'Administration fees','Reversal Administration fees',
'Charges for tax documentation' ,
'Balance of closing entries','Lending commission',
'Reversal Charges for tax documentati']:
pass
elif memo1 in ['Transfer', 'Payment order','Payment']:
o=re.match(',?(?P<DIR>IN|EX) '+
acctidre('ACCTID')+
' AT (?P<XRATE>\d+\.\d+)',memo2)
if o:
if o.group('DIR')=='IN' and units>0:
logging.info('Wrong direction %d'%line_num)
print >>fout,stagvalue('NAME',o.group('ACCTID'), #XACCTID
hashing=hashing),
print >>fout,stagvalue('xrate',o.group('XRATE')),
else:
logging.warning('%s','missing direction and account')
else:
print 'Unknown transaction',memo1
print >>fout,"];"
print >>fout,"."
if (abs(float(f[0]['UNITS_total_credit'])-total_credit)>1 or
abs(float(f[0]['UNITS_total_debit'])-total_debit)>1):
print 'Warning total dont match',f[0]['UNITS_total_credit'],
print total_credit,f[0]['UNITS_total_debit'],total_debit
| Python |
########################################################################
# 3account - personal finance data-base
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
########################################################################
"""Convert Citi N.A. (Citibank North America)
Portfolio (investment) Account Activity CSV file to flat--OFX-RDF and
load into Sesame server.
"""
from csv2flt import *
import re
import os
import time
from n3tagvalue import *
header = [
['Account Number','Memo','Price','Quantity','Activity Type','Check Number',
'Total','Trade Date','Cusip']
]
line=['(?P<ACCTID>[\d\-A-Z]+)',
strre('MEMO'),
floatre('UNITPRICE'),
floatre('UNITS'),
strre('TRNTYPE'),
"(99999)?",
floatre('TOTAL'),
'(?P<month>\d{2})/(?P<day>\d{2})/(?P<year>\d{4})',
'#(((?P<SECID>[A-Z\d]{9})000)|\s*)',
]
footer=[ # This footer was added between 070501 and 070601
[],
["<!-- -->"],
]
def flat(csvfile,fout,context=None,hashing=False):
dtasof=time.strftime("%Y%m%d%H%M%S",time.gmtime(os.stat(csvfile).st_mtime))
logging.info("DTASOF=%s"%dtasof)
n3header(fout,"$Id$"[1:-1],context)
h,t,f = readcsvtable(csvfile,header,line,footer,optional_footer=True)
accttype='INVESTMENT'
currency='USD'
table_total=0
accounts=[]
for l in t:
print >>fout,"a3:flatLine [ ",
acctid=l['ACCTID']
memo=l['MEMO']
if l['SECID']:
secid=l['SECID']
else:
secid='CASH='+currency
dttrade = l['year']+l['month']+l['day']
units=float(l['UNITS']) if l['UNITS'] else None
if units==0: units=None
unitprice=float(l['UNITPRICE']) if l['UNITPRICE'] else None
if unitprice==0: unitprice=None
total=float(l['TOTAL']) if l['TOTAL'] else None
if total==0: total=None
# TRNTYPE
relsecid=None
assetclass=None
trntype=l['TRNTYPE']
if secid.startswith('CASH='):
if trntype=='DEPOSIT':
assert unitprice==None
assert units==None
assert total>0
trntype='CREDIT'
elif trntype=='CASH JRNL':
assert unitprice==None
assert units==None
assert total<0
trntype='DEBIT'
elif trntype=='FEE':
assert unitprice==None
assert units==None
assert total<0
trntype='DEBIT'
else:
logging.info('Unknown TRNTYPE %s'%trntype)
else:
if trntype=='BOUGHT':
assert unitprice>0
assert units>0
assert units*unitprice<=-total+0.02 # rounding error
trntype='BUY'
elif trntype=='SOLD':
assert unitprice>0
assert units<0
assert -units*unitprice>=total-0.03 # Rounding error
trntype='SELL'
elif trntype=='INTEREST':
assert unitprice==None or unitprice==1 # interest from deposit
assert units==None
assert total>0
trntype='INT'
elif trntype=='DIVIDEND':
assert unitprice==None
assert units==None
assert total
if total<0:
logging.info('%s','Negative dividend %f'%total)
trntype='DIV'
elif trntype=='CAPITAL GAIN':
assert unitprice==None
assert units==None
assert total
if total<0:
logging.info('%s','Negative dividend %f'%total)
if memo.find('L/T')!=-1:
trntype='CGLONG'
else:
trntype='CGSHORT'
elif trntype=='REINVEST':
if unitprice==None and units==None:
assert total<0
relsecid=secid
#TODO trntype='DEBIT'
#TODO secid='CASH='+currency
else:
assert unitprice>0
assert units>0
assert not total # is 0 because it is always followed with a REINVEST transaction in current account
trntype='BUY'
elif trntype=='REVERSAL':
pass
elif trntype=='CONVERSION':
assert total==None
assert unitprice==None
trntype='TRANSFER'
elif trntype=='WITHDRAWAL':
if units:
assert unitprice==1
assert units<0
assert -units*unitprice>=total-0.01 # Rounding error
trntype='DEBIT'
assetclass='DEPOSIT'
elif trntype=='DEPOSIT':
if units:
assert unitprice==1
assert units>0
assert units*unitprice<=-total+0.02 # rounding error
trntype='CREDIT'
assetclass='DEPOSIT'
else:
raise Exception('Unknown TRNTYPE')
strn=stagvalue('ACCTID',acctid,hashing=hashing)
strn+=n3secid('CUSIP',secid)
strn+=stagvalue('DTTRADE',dttrade)
strn+=stagvalue('tran',trntype)
strn+=stagvalue('units',units)
print >>fout,stagvalue('FITID',hash(strn)),
print >>fout,strn,
print >>fout,stagvalue('DTASOF',dtasof),stagvalue('ACCTTYPE',accttype),
print >>fout,stagvalue('DTSETTLE',dttrade),
print >>fout,stagvalue('UNITPRICE',unitprice),
print >>fout,stagvalue('TOTAL',total),
print >>fout,stagvalue('CURRENCY',currency),
print >>fout,stagvalue('ASSETCLASS',assetclass),
#TODO print >>fout,stagvalue('RELSECID',relsecid),
print >>fout,stagvalue('mEMO',memo,hashing=hashing)
# Every transaction generates a matching transaction in the current account.
# The only exception is a REINVEST transaction that for some reason has a TOTAL=0 in the CSV file but on the other hand
# has a seperatet DEBIT transaction in the same CSV file.
#if total and not secid.startswith('CASH='):
#relsecid=secid
#trntype='CREDIT' if total>0 else 'DEBIT'
#secid='CASH='+currency
#print >>fout,stagvalue('ACCTID',acctid,hashing=hashing),
#print >>fout,stagvalue('ACCTTYPE','INVESTMENT'),
#print >>fout,n3secid('CUSIP',secid),
#print >>fout,stagvalue('DTSETTLE',dttrade),
#print >>fout,stagvalue('DTTRADE',dttrade),
#print >>fout,stagvalue('TRNTYPE',trntype),
#print >>fout,stagvalue('UNITS',total),
#print >>fout,stagvalue('UNITPRICE',1.),
#print >>fout,stagvalue('TOTAL',total),
#print >>fout,stagvalue('CURRENCY',currency),
#print >>fout,stagvalue('RELSECID',relsecid),
print >>fout,"];"
print >>fout,"."
| Python |
########################################################################
# 3account - personal finance data-base
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
########################################################################
"""Convert BLL investment activity XLS file to flat-OFX-RDF and
load into Sesame server.
"""
from csv2flt import *
import re
from xls2csv2 import xls2csv2
from n3tagvalue import *
import logging
fltparams=None
def acctidre(name):
return '(?P<%s>[\d\-\/]+)'%name
def secid(name):
return '(?P<%s>\d{6,7})'%name
htf_list=[
#version 0
([
['', '', '', '', '', '', '', '', '', '', ''],
['\xfa\xf0\xe5\xf2\xe5\xfa \xe4\xf1\xe8\xe5\xf8\xe9\xe5\xfa', '', '', '',
'', '', '', '', '', '', ''], # In Hebrew: Historical transactions
['\xfa\xe0\xf8\xe9\xea: '+eurdre('DTASOF','/'), '', '', '', '', '', '', '',
'', '', ''],
['', '', '', '', '', '', '', '', '', '', ''],
['', '', '', '', '', '', '', '', '', '', ''],
['\xee\xf1\xf4\xf8 \xf0\xe9\xe9\xf8', # Asset #
'\xf9\xed \xe4\xf0\xe9\xf8', # Asset Name
'\xf4\xf2\xe5\xec\xe4', # operation
'\xfa\xe0\xf8\xe9\xea \xfa\xf9\xec\xe5\xed', # date of payment
'\xeb\xee\xe5\xfa \xee\xf7\xe5\xf8\xe9\xfa', #original amount
'\xeb\xee\xe5\xfa \xe1\xe9\xf6\xe5\xf2', # actual amount
'\xf9\xf2\xf8 \xe1\xe9\xf6\xe5\xf2', # rate
'\xfa\xee\xe5\xf8\xe4', # received
'\xf2\xee\xec\xe5\xfa', # expense
'\xee\xe8\xe1\xf2', # currency
'\xf1\xe8\xe8\xe5\xf1'] # status
],
[secid('SECID')+'.0',
strre('SECNAME'),
strre('TRNTYPE'),
eurdre('DTSETTLE','/',year_len=2)+'( 00:00:00)?',
floatre('ORIGUNITS'),
floatre('UNITS'),
floatre('UNITPRICE'),
floatre('net_total'),
floatre('FEES'),
strre('CURRENCY'),
strre('STATUS')],
None),
#version1
([
['', '', '', '', '', '', '', '', '', ''],
['\xfa\xf0\xe5\xf2\xe5\xfa \xe4\xf1\xe8\xe5\xf8\xe9\xe5\xfa', '', '', '',
'', '', '', '', '', ''], # In Hebrew: Historical transactions
['('+'\xfa\xe0\xf8\xe9\xea: '+eurdre('DTASOF','/')+')?', '', '', '', '', '',
'', '', '('+'\xfa\xe0\xf8\xe9\xea: '+eurdre('DTASOF','/')+')?', ''],
['', '', '', '', '', '', '', '', '', ''],
['', '', '', '', '', '', '', '', '', ''],
['\xee\xf1\xf4\xf8 \xf0\xe9\xe9\xf8', # Asset #
'\xf9\xed \xe4\xf0\xe9\xf8', # Asset Name
'\xf4\xf2\xe5\xec\xe4', # operation
'\xfa\xe0\xf8\xe9\xea \xf2\xf8\xea', # value date
'\xeb\xee\xe5\xfa \xee\xf7\xe5\xf8\xe9\xfa', #original amount
'\xeb\xee\xe5\xfa \xe1\xe9\xf6\xe5\xf2', # actual amount
'\xf9\xf2\xf8 \xe1\xe9\xf6\xe5\xf2', # rate
'\xfa\xee\xe5\xf8\xe4 \xf0\xe8\xe5', # received net
'\xf2\xee\xec\xe5\xfa', # expense
'\xf1\xe8\xe8\xe5\xf1'] # status
],
[secid('SECID')+'.0',
strre('SECNAME'),
strre('TRNTYPE'),
eurdre('DTSETTLE','/',year_len=2)+'( 00:00:00)?',
floatre('ORIGUNITS'),
floatre('UNITS'),
floatre('UNITPRICE'),
floatre('net_total'),
floatre('FEES'),
strre('STATUS')],
None),
#version2
([
['', '', '', '', '', '', '', '', '', '', ''],
['\xfa\xf0\xe5\xf2\xe5\xfa \xe4\xf1\xe8\xe5\xf8\xe9\xe5\xfa', '', '', '',
'', '', '', '', '', '', ''], # In Hebrew: Historical transactions
['('+'\xfa\xe0\xf8\xe9\xea: '+eurdre('DTASOF','/')+')?', '', '', '', '', '',
'', '', '', '', ''],
['', '', '', '', '', '', '', '', '', '', ''],
['', '', '', '', '', '', '', '', '', '', ''],
['\xee\xf1\xf4\xf8 \xf0\xe9\xe9\xf8', # Asset #
'\xf9\xed \xe4\xf0\xe9\xf8', # Asset Name
'\xf4\xf2\xe5\xec\xe4', # operation
'\xfa\xe0\xf8\xe9\xea \xf2\xf8\xea', # value date
'\xeb\xee\xe5\xfa \xee\xf7\xe5\xf8\xe9\xfa', #original amount
'\xeb\xee\xe5\xfa \xe1\xe9\xf6\xe5\xf2', # actual amount
'\xf9\xf2\xf8 \xe1\xe9\xf6\xe5\xf2', # rate
'\xfa\xee\xe5\xf8\xe4 \xf0\xe8\xe5', # received net
'\xf2\xee\xec\xe5\xfa', # expense
'\xee\xe8\xe1\xf2', # currency
'\xf1\xe8\xe8\xe5\xf1'] # status
],
[secid('SECID')+'.0',
strre('SECNAME'),
strre('TRNTYPE'),
eurdre('DTSETTLE','/',year_len=2)+'( 00:00:00)?',
floatre('ORIGUNITS'),
floatre('UNITS'),
floatre('UNITPRICE'),
floatre('net_total'),
floatre('FEES'),
strre('CURRENCY'),
strre('STATUS')],
None),
#version 3
([
['', '', '', '', '', '', '', '', ''],
['\xfa\xf0\xe5\xf2\xe5\xfa \xe4\xf1\xe8\xe5\xf8\xe9\xe5\xfa', '', '', '',
'', '', '', '', ''], # In Hebrew: Historical transactions
['('+'\xfa\xe0\xf8\xe9\xea: '+eurdre('DTASOF','/')+')?', '', '', '', '', '',
'', '', ''],
['', '', '', '', '', '', '', '', ''],
['', '', '', '', '', '', '', '', ''],
['\xee\xf1\xf4\xf8 \xf0\xe9\xe9\xf8', # Asset #
'\xf9\xed \xe4\xf0\xe9\xf8', # Asset Name
'\xf4\xf2\xe5\xec\xe4', # operation
'\xfa\xe0\xf8\xe9\xea \xfa\xf9\xec\xe5\xed', # payment date
'\xeb\xee\xe5\xfa', #amount
'\xf9\xf2\xf8', # rate
'\xfa\xee\xe5\xf8\xe4', # received
'\xf2\xee\xec\xe5\xfa', # expense
'\xee\xe8\xe1\xf2', # currency
] # status
],
[secid('SECID')+'.0',
strre('SECNAME'),
strre('TRNTYPE'),
eurdre('DTSETTLE','/',year_len=2)+'( 00:00:00)?',
floatre('UNITS'),
floatre('UNITPRICE'),
floatre('net_total'),
floatre('FEES'),
strre('CURRENCY'),
],
None)
]
def pre(fname,csvfile,hashing=False):
xls2csv2(fname,csvfile,'hebrew')
def flat(csvfile,fout,context=None,hashing=False):
n3header(fout,"$Id$"[1:-1],context)
h,t,f,version=readcsvtable_multiformat(csvfile,htf_list)
l=h[2]
dtasof = l['DTASOF_year']+l['DTASOF_month']+l['DTASOF_day']
accttype='INVESTMENT'
for l in t:
print >>fout,"a3:flatLine [ ",
dtsettle = l['DTSETTLE_year']+l['DTSETTLE_month']+l['DTSETTLE_day']
if dtsettle>dtasof:
raise Exception('DTSETTLE=%s after DTASOF=%s of the entire document'
%(dtsettle,dtasof))
units=float(l['UNITS'])
origunits=float(l['ORIGUNITS']) if 'ORIGUNITS' in l else None
if units:
unitprice=float(l['UNITPRICE'])/100
assert unitprice>0
else:
unitprice=0
assert float(l['UNITPRICE'])==0
taxes=0
interest=0
cost=0
total=0
net_total=0
fees=0
fees=float(l['FEES'])
if version==1 or version==2: fees=-fees
wrong_fee_sign= fees<0
if wrong_fee_sign:
logging.warning('%s','Wrong fees sign %f'%fees)
fees=-fees
try:
trntype={'\xf7\xf0\xe9\xe4':'BUY',
'\xee\xeb\xe9\xf8\xe4':'SELL',
'\xe4\xe6\xee\xf0\xe4':'ORDER',
'\xf8\xe9\xe1\xe9\xfa':'INT',
'\xe2\xe1\xe9\xfa \xee\xf1':'TAX',
'\xe4\xe7\xe6\xf8 \xee\xf1 \xf8\xe5\xe5\xe7 \xe4\xe5\xef':
'TAXRETURN',
'\xe3\xe9\xe1\xe9\xe3\xf0\xe3':'DIV',
}[l['TRNTYPE']]
except:
logging.error('Unknown transaction type %s'%l['TRNTYPE'])
raise Exception('Unknown transaction type %s'%l['TRNTYPE'])
if trntype=='ORDER':
if units>0:
trntype='BUY'
else:
trntype='SELL'
price=float(l['net_total'])
if trntype=='BUY':
if price>=0:
if wrong_fee_sign:
logging.warning('%s','Wrong BUY sign')
else:
print 'Wrong BUY sign'
price=-price
elif trntype=='SELL':
if price<=0:
if wrong_fee_sign:
logging.warning('%s','Wrong SELL sign')
else:
print 'Wrong SELL sign'
price=-price
elif trntype=='INT':
if price<=0:
if wrong_fee_sign:
logging.warning('%s','Wrong INT sign')
else:
print 'Wrong INT sign'
price=-price
if version==1 or version==2:
total=price
net_total=total+fees
else:
net_total=price
total=net_total-fees
if version==1:
currency='ILS'
else:
try:
currency={'\xf9"\xe7':'ILS'}[l['CURRENCY']]
except:
raise Exception('Unknown currency %s'%l['CURRENCY'])
if ('STATUS' in l and
l['STATUS'] not in [
'\xee\xec\xe0', # complete
'\xe2\xee\xf8 \xe1\xe9\xf6\xe5\xf2', #end operation
'\xe7\xec\xf7\xe9', # partial
'-']):
raise Exception('Unknown status %s'%l['STATUS'])
if trntype in ['BUY', 'SELL']:
if trntype == 'BUY' and units<=0: print 'Wrong units sign on buy'
if trntype == 'SELL' and units>=0: print 'Wrong units sign on sell'
if origunits and units!=origunits:
logging.info('%s','Units differ from original units '+
l['UNITS']+' '+l['ORIGUNITS'])
assert units
assert unitprice
cost=-net_total
if trntype=='BUY':
assert cost>0
else:
assert cost<0
elif trntype == 'INT' or trntype == 'DIV':
assert not units
assert not origunits
assert not unitprice
interest=net_total
assert interest>0
elif trntype=='TAX':
assert not units
assert not unitprice
assert not net_total
assert not fees
taxes=-origunits
assert taxes>0
total=-taxes
elif trntype=='TAXRETURN':
trntype='TAX'
assert(units==0)
assert(unitprice==0)
assert(net_total==0)
assert(fees==0)
taxes=origunits
assert taxes<0
total=-taxes
else:
raise Exception('Bug - unknown trntype')
diff=-units*unitprice+interest-fees-taxes-total
if abs(diff)>0.5:
if (version>0) and diff>0 and fees and not taxes:
if trntype=='SELL':
logging.warning('%s',"%s price missmatch taking it as TAX on capital-gain %f=%.1f%%"%(stagvalue('DTSETTLE',dtsettle),diff,100.*diff/(-units*unitprice)))
taxes=diff
elif trntype=='INT':
logging.warning('%s',"%s price missmatch taking it as TAX on interest %f=%.1f%%"%(stagvalue('DTSETTLE',dtsettle),diff,100.*diff/interest))
taxes=diff
else:
logging.error('%s',"%s price missmatch %f"%
(stagvalue('DTSETTLE',dtsettle),diff))
else:
logging.error('%s',"%s price missmatch %f"%
(stagvalue('DTSETTLE',dtsettle),diff))
acctid=fltparams.get('ACCTID',[]) if fltparams else []
if len(acctid)!=1 or not acctid[0]:
logging.error("ACCTID must be given using the -x ACCTID=... argument")
raise Exception()
strn=[]
strn.append(stagvalue('ACCTID',acctid[0],hashing=hashing))
strn.append(n3secid('TASE',l['SECID']))
strn.append(stagvalue('DTTRADE',dtsettle)) # we must have a DTTRADE
strn.append(stagvalue('DTSETTLE',dtsettle))
strn.append(stagvalue('tran',trntype))
if units: strn.append(stagvalue('units',units))
strn=" ".join(strn)
print >>fout,stagvalue('FITID',hash(strn)),
print >>fout,strn,
print >>fout,stagvalue('DTASOF',dtasof),
print >>fout,stagvalue('ACCTTYPE','INVESTMENT'),
print >>fout,stagvalue('SECNAME',l['SECNAME'],language="hebrew"),
print >>fout,stagvalue('TRNTYPE',trntype),
if unitprice: print >>fout,stagvalue('UNITPRICE',unitprice),
if total: print >>fout,stagvalue('TOTAL',total),
if fees: print >>fout,stagvalue('FEES',fees),
if taxes: print >>fout,stagvalue('TAXES',taxes),
print >>fout,stagvalue('CURRENCY',currency),
print >>fout,"];"
print >>fout,"."
| Python |
########################################################################
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
########################################################################
"""ofx2xml OFX-file-name XML-file-name
write OFX v1.02 file in XML format.
You should supply the name of the OFX file to read and the name of the XML file
in which the result will be written to.
OFX v1.02 is written in SGML which is harder to parse than XML
Also OFX v2 are XML based.
Hopefully the same code can be used to parse both the output of this program and OFX v2.
This program verify the HTTP header of an OFX reply remove it and then
transfer the body of the HTTP OFX reply to the execuable osx.exe
osx.exe is assumed to be located in c:\OpenSP
you can download the latest version from http://sourceforge.net/projects/openjade/, http://openjade.sourceforge.net/
(I tried OpenSP-1.5.2-win32.zip)
For a minimal configuration you will need sox.exe and ospXXX.dll
osx.exe needs the definitions (DTD) of OFX supplied from
http://www.ofx.net/DownloadPage/Files/OFX1.0.3.zip
The files are:
ofxact.dtd,ofxbank.dtd,ofxbill.dtd,ofxinv.dtd,ofxmail.dtd,ofxmain.dtd,ofxprof.dtd,ofxsign.dtd
place these files at c:\OpenSP
"""
import sys, os, string
import subprocess
def ofx2xml(ofxfile,outfile):
if not os.path.exists(ofxfile):
print "\nFile %s not found." % (ofxfile)
raise Exception('File not found')
fofx=open(ofxfile,'r')
lofx=fofx.readlines()
fofx.close()
# Skip everything before <OFX>
scanner_output = []
in_ofx = False
for l in lofx:
l = l.replace('&','&')
if in_ofx:
scanner_output.append(l)
elif l.find('<OFX>') != -1:
scanner_output.append(l)
in_ofx = True
if os.path.exists('c:/OpenSP'):
popen_args = 'c:/OpenSP/osx.exe -D c:\\OpenSP -wno-valid ofxmain.dtd -'
else:
popen_args = ['osx', '-wno-valid', 'ofxmain.dtd', '-']
if isinstance(outfile,basestring):
xml_file = open(outfile, 'w')
else:
tmpfile=ofxfile+'.xml'
xml_file = open(tmpfile, 'w')
p = subprocess.Popen(popen_args,
stdin=subprocess.PIPE, stdout=xml_file,
stderr=subprocess.PIPE)
(out, err) = p.communicate(input=''.join(scanner_output))
if p.returncode != 0:
if out:
print "osx stdout:\n" + out
if err:
print "osx stderr:\n" + err
raise Exception("Unexpected return code %d from osx" % p.returncode)
xml_file.close()
if not isinstance(outfile,basestring):
xml_file=open(tmpfile,'r')
for line in xml_file:
outfile.write(line)
xml_file.close()
os.remove(tmpfile)
if __name__ == "__main__":
if len(sys.argv) < 3:
print "ofx2xml <OFX-file> <XML-file>"
sys.exit(1)
ofxfile = sys.argv[1]
xmlfile = sys.argv[2]
ofx2xml(ofxfile,xmlfile)
| Python |
########################################################################
# Pyfm - personal finance data-base
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
########################################################################
"""
You will need to add pdftotext.exe to your PATH. It can be extracted from ZIP
file xpdf...-win32.zip downloaded from http://www.foolabs.com/xpdf/download.html
"""
help="""
"""
import os
import subprocess
import logging
from scanner import *
from csv2flt import *
from n3tagvalue import *
def pre(fname,tmpname,hashing=False):
# stdout returns empty with
# popen= subprocess.Popen(['pdftotext','-layout', fname, 'con'], # con - redirect to output
# stderr=subprocess.PIPE,stdout=tmpname)
# (stdoutdata, stderrdata) = popen.communicate()
# rc = popen.returncode:
tmpfile=fname+'.txt'
rc= subprocess.call(['pdftotext','-layout',fname,tmpfile])
if rc:
raise Error('converting %s to temp file gave %d'%(fname,rc))
fp=open(tmpfile,'r')
for line in fp:
tmpname.write(line)
fp.close()
os.remove(tmpfile)
####################
# scanner templates
# put in 'l' all the lines starting with "Nominal/Number"
t_remove_header="""*@'(?!\s*Statement of Investments as of).*'@
@'\s*Statement of Investments as of\s+(?P<m>\d\d)\.(?P<d>\d\d)\.(?P<y>\d{4}).*'@
@'.*'@
@'\s*Portfolio Number\s+(?P<acctid>[\d\-]+).*'@
*@'(?!Nominal/Number).*'@
*@'(?P<l>.*)'@"""
# put in 'l' all lines until reaching "Nominal/Number...cash flow"
t_remove_tail="""*@'(?P<l>(?!Nominal/Number\s+Description\s+Market price\s+Net purchase value\s+Market value\s+P/L security / %\s+Exp. cash flow).*)'@
*@'.*'@"""
#Each asset is made out of 3 lines
t_get_asset="""*@'(?!(([A-Z]{3})?\s{1,20}(((\-?\d{1,3}(,\d{3})*(\.\d\d)?)|0))\s{1,30})(\(?\w+|(\d+|\d*\.\d+)\%\s)).*'@
@'(?P<l>.*)'@
@'(?P<l>.*)'@
@'(?P<l>.*)'@
*@'(?P<n>.*)'@"""
###############################
# regular expressions
# first and second lines of each asset should look like this:
re_asset_1st_line =re.compile("""^
(?P<CUR>[A-Z]{3})?\s{1,20}
(?P<UNITS>((\-?\d{1,3}(,\d{3})*(\.\d\d)?)|0))\s{1,30}
(?P<SECNAME>[^\s]+(\s[^\s]+)*)\s{4,}
(((?P<SECID>\w{12})
(\s+(?P<YIELD>(\d*\.(\d{2,})|NA))\%?)?\s+
(?P<CUR1>[A-Z]{3})\s+
(?P<UNITPRICE>\d+\.\d{4})\s
(?P<m>\d\d)\.(?P<d>\d\d)\.(?P<y>\d\d)\s{10,26}
)|(\s{60,}))
(?P<MKTVAL>\d{1,3}(,\d{3})*)\s{6,18}\d+\.\d\d$""",re.X)
re_asset_2nd_line=re.compile("""^
((\s{20,30}
(?P<SECNAME>[^\s]+(\s[^\s]+)*)(\s{4,}.*)?
)|)$""",re.X)
re_asset_3nd_line=re.compile("""^
((\s{20,30}
Sector:\s+(?P<SECTOR>[^\s]+(\s+[^\s\d][^\s]*)*)\s*((?P<m>\d\d)\.(?P<d>\d\d)\.(?P<y>\d\d))?.*
)|)$""",re.X)
def flat(finp,fout,context=None,hashing=False):
n3header(fout,"$Id$"[1:-1],context)
namespace["commanumre"]=commanumre
namespace["commaintre"]=commaintre
namespace["eurdre"]=eurdre
try:
rows=scanner(t_remove_header,finp)
txt=[]
month=''
day=''
year=''
acctid=''
for r in rows:
if 'm' in r: month=r['m']
if 'd' in r: day=r['d']
if 'y' in r: year=r['y']
if 'acctid' in r: acctid=r['acctid']
if 'l' in r: txt.append(r['l'])
txt='\n'.join(txt)
rows=scanner(t_remove_tail,txt)
txt=[]
for r in rows:
txt.append(r['l'])
txt='\n'.join(txt)
dtasof=year+month+day
while txt:
secname=cur=units=secid=secidtype=price=mktval=None
line_acctid=acctid
line_dtasof=None
rows=scanner(t_get_asset,txt)
ctxt=[]
ntxt=[]
for r in rows:
if 'l' in r: ctxt.append(r['l'])
if 'n' in r: ntxt.append(r['n'])
m=re_asset_1st_line.match(ctxt[0])
if not m:
raise Exception('bad format 1')
mdict=m.groupdict()
m=re_asset_2nd_line.match(ctxt[1])
if not m:
raise Exception('bad format 2')
mdict2=m.groupdict()
txt='\n'.join(ntxt)
cur=mdict['CUR']
if not cur:
cur=mdict['CUR1']
units=mdict['UNITS']
units=units.replace(',','')
secname=mdict['SECNAME']
if mdict2['SECNAME'] and not mdict2['SECNAME'].startswith('Sector: '):
secname+=' '+mdict2['SECNAME']
if mdict['SECID']:
secid=mdict['SECID']
secidtype='ISIN'
else:
if secname.startswith('Current Account '):
if not cur:
print "Missing Currency"
secid=cur
secidtype='CHECKING'
m=re.match('^Current Account (?P<ACCTID>\d{4}\-\d{6}\-\w+(\-\w+)?)$',secname)
if not m:
print "Bad Current Account", secname
else:
line_acctid=m.group('ACCTID')
secname=None
else:
secidtype='MNYMARKET'
secid=cur
m=re.match('^(\d+(\.\d+)?|\.\d+)% \-(?P<ACCTID>\d{6}\-\w+(\-\w+)?) \d{2}\.\d{2}\.\d{2}Overnight Money Fiduciary$',secname)
if not m:
print "Bad money market",secname
else:
line_acctid=m.group('ACCTID')
secname=None
price = mdict['UNITPRICE'] if mdict['UNITPRICE'] else "1"
yld = None
if mdict['YIELD']:
price = float(price) / 100.
yld=mdict['YIELD']
if mdict['MKTVAL']:
mktval=mdict['MKTVAL'].replace(',','')
if mdict['m']:
line_dtasof='20'+mdict['y']+mdict['m']+mdict['d']
if mdict2['SECNAME'] and mdict2['SECNAME'].startswith('Sector: '):
l3=ctxt[1]
else:
l3=ctxt[2]
m=re_asset_3nd_line.match(l3)
if not m:
raise Exception('bad format 3')
mdict3=m.groupdict()
fiassetclass=mdict3['SECTOR']
if mdict3['m']:
dtmat='20'+mdict3['y']+mdict3['m']+mdict3['d']
else:
dtmat=None
print >>fout,"a3:flatLine [ ",
if secname: print >>fout,stagvalue('SECNAME',secname),
print >>fout,stagvalue('CURRENCY',cur),
print >>fout,stagvalue('UNITS',units),
print >>fout,stagvalue('ACCTID',line_acctid,hashing=hashing),
print >>fout,n3secid(secidtype,secid),
if price and price!="NA": print >>fout,stagvalue('UNITPRICE',price),
if line_dtasof: print >>fout,stagvalue('dtasof',line_dtasof),
print >>fout,stagvalue('DTASOF',dtasof),
if yld and yld!="NA": print >>fout,stagvalue('YIELD',yld),
if mktval: print >>fout,stagvalue('MKTVAL',mktval),
if fiassetclass: print >>fout,stagvalue('FIASSETCLASS',fiassetclass),
if dtmat: print >>fout,stagvalue('DTMAT',dtmat),
print >>fout,"];"
except scannerror, v:
pass
finally:
print >>fout,"."
finp.close()
| Python |
"""ofx2n3.py -- interpret OFX format as RDF
Converts OFX format (as in downloaded back statements etc
The conversion is only syntactic. The OFX modelling is
pretty weel thought out, so taking it as defining an effecive
RDF ontolofy seems to make sense. Rules can then be used to
define mapping into your favorite ontology.
DESIGN NOTES
The properties have even been left in upper
case, although I wouldn't do that again next time.
The SGML/XML tree is converted into a tree of blank nodes.
This is made easier by the rule that OFX does not allow empty elements
or mixed content.
OFX actually defines a request-response protocol using HTTP and
SGML (v1.*) or XML (v2.*).
I have only had access to downloaded statements which look like HTTP
responses carrying SGML, so that is what this handles.
REFERENCES
This converts data from the common proprietary format whcih seems
to be in use. The spec i found is a later XML-based version, which will
be much simpler. Alas the spec not served directly on the web.
"Open" Financial Exchange
Specification 2.0
April 28, 2000 (c) 2000 Intuit Inc., Microsoft Corp.
We try to stick to:
Python Style Guide
Author: Guido van Rossum
http://www.python.org/doc/essays/styleguide.html
LICENSE OF THIS CODE
Workspace: http://www.w3.org/2000/10/swap/pim/financial/
Copyright 2002-2003 World Wide Web Consortium, (Massachusetts
Institute of Technology, European Research Consortium for
Informatics and Mathematics, Keio University). All Rights
Reserved. This work is distributed under the W3C(R) Software License
http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231
in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE.
This was http://www.w3.org/2000/10.swap/pim/financial/OFX-to-n3.py
"""
help="""Convert OFX file to OFX-RDF (in memory) and load to Sesame server.
"""
__version__ = "$Id: ofx2n3.py 161 2009-10-25 00:18:28Z udi.benreuven $"
thisSource = "http://www.w3.org/2000/10.swap/pim/financial/OFX-to-n3.py"
import os
import sys
import re
import logging
from n3tagvalue import stagvalue,n3header
reHeader = re.compile(r"""\s*
(OFXHEADER:(?P<OFXHEADER>\d+)\s*)
(DATA:(?P<DATA>[A-Z][A-Z0-9]*?)\s*)
(VERSION:(?P<VERSION>\d*)\s*) # do a non-greedy match, not eat the next tag
(SECURITY:(?P<SECURITY>[A-Z][A-Z0-9]*?)\s*)
(ENCODING:(?P<ENCODING>[A-Z][A-Z0-9]*?)\s*)
(CHARSET:(?P<CHARSET>\d+)\s*)
(COMPRESSION:(?P<COMPRESSION>[A-Z][A-Z0-9]*?)\s*)
(OLDFILEUID:(?P<OLDFILEUID>[A-Z][A-Z0-9]*?)\s*)
(NEWFILEUID:(?P<NEWFILEUID>[A-Z][A-Z0-9]*)\s*)
""",re.X) #
reTag = re.compile(r"\s*<(?P<endtag>/)?(?P<tag>[A-Z][A-Z0-9\.]+)>")
reValue = re.compile(r"([^<]*)")
def flat(fin,fout,context=None,hashing=False):
if isinstance(fin,str):
fin=open(fin)
else:
fin.seek(0)
doc = fin.read() # Process the content as a singel buffer
n3header(fout,"$Id$"[1:-1],context)
print >>fout," ofxh:headers ["
doc=doc.strip()
stack = []
header = reHeader.match(doc)
if not header: raise SyntaxError("Can't find header")
pos = header.end()
header = header.groupdict()
for tag,value in header.iteritems():
print >>fout," ofxh:%s \"%s\";" % (tag, value) #@@ do n3 escaping
print >>fout,"];\n"
# Check our assumptions on header
if header["ENCODING"] != "USASCII":
raise SyntaxError('ENCODING:%s should be USASCII'%header["ENCODING"])
if header["OFXHEADER"] != "100":
raise SyntaxError('OFXHEADER:%s should be 100'%header["OFXHEADER"])
if header["VERSION"] != "102":
raise SyntaxError('VERSION:%s should be 102'%header["VERSION"])
valuetag=None
while pos < len(doc):
tag = reTag.match(doc,pos)
if not tag:
raise SyntaxError("No Tag %s..."%doc[pos:pos+20])
pos = tag.end()
endtag = tag.group("endtag")
tag = tag.group("tag")
if endtag:
if tag != valuetag:
tag2 = stack.pop()
if tag != tag2: raise SyntaxError(
"Found </%s> when </%s> expected.\nStack: %s" %
(tag, tag2, stack))
print >>fout,"%s]; # %s" % (" "*len(stack), tag)
valuetag=None
else:
value = reValue.match(doc,pos)
if value:
pos = value.end()
value = value.group(1).strip()
else:
value=""
if not value: # Start tag
valuetag=None
print >>fout,"%s ofx:%s [" %(" "*len(stack), tag)
stack.append(tag)
else: # Data tag
valuetag=tag
print >>fout," "*len(stack),stagvalue(tag,value,hashing=hashing)
if stack: raise SyntaxError("Unclosed tags: %s" % stack)
print >>fout,"."
| Python |
########################################################################
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
########################################################################
r"""Flat files
All incoming statements are flat lines:
Statements have a relatively shallow and rigid structrue, in order to simplfy
the processing and make all formats of bank statements look the same, the
statements are transformed into lines that are completely flat:
each line contains all the information it needs without any reference to other
lines.
A line is made from pairs of tag and value.
All the pairs in the same line are assumed to be bound togther and describe
different aspects of the same object(s)
(e.g., bank, account, holding, transaction or asset).
The syntax of each line is:
<flat-line> = [SP <tag><op><value> ]*
<tag> = <a3Tag> | <ofxTag>
<ofxTag> = [A-Z]+ # tag from ofx: name space
<a3Tag> = [a-z][a-zA-Z0-9]* # tag from a3: name space
<op> = <relational-op>
<relational-op> = '='
<value> = <text-without-blanks> | ...
(QUOTE <any-text> QUOTE) | ... # Surounding quotes on values are removed
'`' <python-expression> '`' | # Evaluate the text surronded by a back-strich
# as a python expression and cast to string
* You can insert a quote by \"
* " is translated back to a quote char.
* You can put a '#' comment at an end of a flat line
* empty lines are ignored
* The parse method converts lines to a dictionary in which the tag is the key
and its value is a list of all the values the same tag received in the line.
As a result the order in which pairs appeared in the line is not preserved.
None flat aspects of a file of flat lines:
* All lines are assumed to come from the file even if the file name itself does
not appear in each line this is trivial but it has some
implications. For example: two lines in the same file showing a transaction
from the same holding on the same time will be assumed to be two different
transactions if they come from the same file, however the same two lines
coming from two different files are more likely to be two different
discriptions of the same transaction. Note that the file basename itself will
be used as a reference, without its path or extension or any other
signature so different unique names should be used for different flat files.
"""
import os
import re
import sys
from rdflib.graph import *
from rdflib.namespace import *
from n3tagvalue import *
OFX = Namespace("http://www.w3.org/2000/10/swap/pim/ofx#")
A3 = Namespace("http://code.google.com/p/3account/wiki/Schema#")
XSD = Namespace("http://www.w3.org/2001/XMLSchema#")
FS = Namespace("file://")
flatLine = A3["flatLine"]
reobj=re.compile(r"""(?x) # allow for commenting inside the RE
\s* # Ignore white space
(
(?P<comment>\#)| # Ignore the rest of the line.
( # or look for a regular tag-value pair
(?P<tag>\w+) # tag
= # look for operator
( # Value
(?P<quote>"|`)? # look for quote at start of value
(?P<value>.*?) # finally the value. Dont be greedy so not to eat the
# next token. Note that an empty value is also OK
(?(quote)
((?<!\\)(?P=quote))| # If there was a quote at the start then look
# for it at the end. Skip backslash quote
(?=(\s|$)) # If there wasn't a quote at start: The value is not
# greedy so force an unquoted value to be followed with
# white-space or or to continue to end of string
)
) #end of value
) # end of tag-value pair
)""")
def parse(line):
"""Parse a line from a flat file into its tag-value pairs.
The return-value is a dictionary where the keys are the tags and the
values are the list of the values that appeared in the line.
"""
if not line: return {}
l=line
ldict={}
while True:
p=reobj.match(l)
if not p: break
# handle the return value in line for a recursive usage
l=l[p.end():]
if p.group('comment'):
return ldict
tag=p.group('tag')
values=ldict.setdefault(tag,[]) # Make sure the tag appears in ldict even if there is no value.
quote=p.group('quote')
value=p.group('value')
value=value.replace('"','"')
if not value: # empty values don't need to be added to list of values for tag in ldict
pass
elif quote=='`':
values.append(eval(value))
else:
values.append(value)
return ldict
def flat(fin,fout,context=None,hashing=False):
if isinstance(fin,str):
fin=open(fin, "r")
else:
fin.seek(0)
n3header(fout,"$Id$"[1:-1],context)
for line in fin:
line = parse(line)
if not line: continue
print >>fout,"a3:flatLine [ ",
for tag,value in line.iteritems():
print >>fout,stagvalue(tag,value,hashing=hashing),
print >>fout,"];"
print >>fout,"."
fin.close()
| Python |
########################################################################
# xls2csv2 - convert excel to csv and handle encoding
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
########################################################################
"""xls2csv2 [-e encoding] Excel-file-name CSV-file-name
write the Excel file in CSV format
handle unicode strings using specified encoding.
Example: xls2csv2 -e hebrew foo.xls foo.csv
"""
import sys, os, string
from optparse import OptionParser
try:
import win32com.client
import pythoncom
except ImportError:
pass
import csv
def xls2csv2(xlfile,csvfile,encoding):
xlfile=os.path.abspath(xlfile)
try:
xl = win32com.client.Dispatch ("Excel.Application")
# xl.Visible = 1
wb = xl.Workbooks.Open (xlfile)
xls = wb.ActiveSheet
nr = xls.UsedRange.Rows.Count
nc = xls.UsedRange.Columns.Count
if isinstance(csvfile,str):
fp=open(csvfile, "wb")
else:
fp=csvfile
writer = csv.writer(fp)
for r in range(1,nr+1):
row=[]
for c in range(1,nc+1):
cell = xls.Cells(r,c).Value
if (not encoding is None) and isinstance(cell,unicode):
cell=cell.encode(encoding,'replace')
row.append(cell)
writer.writerow(row)
wb.Close(SaveChanges=0)
xl.Quit()
except pythoncom.com_error, (hr, msg, exc, arg):
print "Failed to convert excel file to CSV"
print "The Excel call failed with code %d: %s" % (hr, msg)
if exc is None:
print "There is no extended error information"
else:
wcode, source, text, helpFile, helpId, scode = exc
print "The source of the error is", source
print "The error message is", text
print "More info can be found in %s (id=%d)" % (helpFile, helpId)
raise
except:
print "failed to write to csv file", sys.exc_info()[0]
raise
def main():
parser = OptionParser(__doc__)
ehelp="Cell containing unicode will be encoded with this encoder, e.g.,\
-e hebrew"
parser.add_option("-e", "--encoding", dest="encoding",help=ehelp)
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("Missing file name")
xlfile = args[0]
csvfile = args[1]
if not os.path.exists(xlfile):
print "\nFile %s not found." % (xlfile)
sys.exit(1)
try:
xls2csv2(xlfile,csvfile,options.encoding)
except:
sys.exit(1)
if __name__ == "__main__":
main()
| Python |
"""Init.py
I'm not actually sure what this does.
"""
__version__ = "$Revision: 1.1 $"
__all__ = [ "string"
]
| Python |
import hashlib
import logging
import re
import ConfigParser
# List of tags which we want their value to be hashed if "hashing" is True
hash_list = ["ACCTID", "INTU_USERID","owner"];
# List of tags that may contain a sub-string that needs to be hashed
# usually transfers write the account to which the transfer is made in these
# fields.
hash_replace_list = ["NAME", "MEMO", "mEMO","acctMemo"]
typeDecimal = ["MKTVAL", "TRNAMT", "UNITS", "UNITPRICE",
"TOTAL", "BALAMT", "PARVALUE", "COUPONRT", "MARKDOWN", "MARKUP",
"COMMISSION", "FEES","AVAILCASH","MARGINBALANCE",
"SHORTBALANCE","BUYPOWER","INTEREST","FEES","TAXES"
]
typeDT = ["DTASOF", "DTPRICEASOF", "DTTRADE", "DTSETTLE", "DTPOSTED",
"DTSTART", "DTEND", "DTMAT", "DTSERVER", "DTPROFUP",
]
reDT = re.compile(r"""(?P<Y>(\d\d|\d{4}))(?P<M>\d{2})(?P<D>\d{2}) # Year, Month, Day
((?P<h>\d{2})(?P<m>\d{2})((?P<s>\d{2})(\.\d+)?)?)? # Hours, Minutes,seconds,mili
((\[(?P<tzsign>(\+|\-))?(?P<tz>\d{1,2})\:[A-Z]{3,4}\])|[A-Z]{3,4})? # Time-Zone
$""",re.X)
def expandyear(year):
"""Y2K"""
if len(year)==2:
if year>'50':
year='19'+year
else:
year='20'+year
return year
def svalue(tag,value,language=None,hashing=False):
if isinstance(value,basestring):
value=value.replace('"','\\"')
else:
value=str(value)
value_type=""
if tag.upper() in typeDecimal:
value_type = "^^xsd:decimal"
elif tag.upper()[0:2]=="DT":
value_type = "^^xsd:dateTime"
m= reDT.match(value)
if not m: raise SyntaxError("Bad date-time %s" % (value))
if m.group("h"):
value = "%s-%s-%sT%s:%s:%s"%(
expandyear(m.group("Y")),m.group("M"),m.group("D"),
m.group("h"),m.group("m"),m.group("s"))
else:
value = "%s-%s-%sT00:00:00"%(
expandyear(m.group("Y")),m.group("M"),m.group("D"))
if m.group("tz"):
if m.group("tzsign"):
value += m.group("tzsign")
else:
value += '+'
if len(m.group("tz")) ==1:
value += '0'
value += m.group("tz")
value += ":00"
else:
import cherrypy
value+=cherrypy.config.get('tz','')
elif hashing and tag.upper() in hash_list:
new_value = hashlib.sha1(value).hexdigest()[:4]
if isinstance(hashing,ConfigParser.RawConfigParser):
if not hashing.has_section('hash'):
hashing.add_section('hash')
if hashing.has_option('hash',value):
new_value = hashing.get('hash',value)
else:
hashing.set('hash',value,new_value)
logging.info("Hashing %s to %s"%(value,new_value))
value=new_value
elif (tag.upper() in hash_replace_list and
isinstance(hashing,ConfigParser.RawConfigParser) and
hashing.has_section('hash')
):
for old,new in hashing.items('hash'):
value = value.replace(old,new)
elif language:
value = value.decode(language).encode('utf-8')
return (value,value_type)
def stagvalue(tag,values,language=None,hashing=False):
if not isinstance(values,list):
values=[values]
tag=tag.replace(".","_")
res=""
for value in values:
if isinstance(value,float) or value:
value,value_type=svalue(tag,value,language,hashing)
ns = 'ofx' if tag.isupper() else 'a3'
res+='%s:%s "%s"%s; ' % (ns,tag, value,value_type)
return res
def sdict(d,language=None,hashing=False):
if not d: return
assert isinstance(d,dict)
res=[]
for k,v in d.iteritems():
res.append(stagvalue(t,v,language,hashing))
return " ".join(res)
def n3secid(uniqueidtype,uniqueid):
secid='ofx:UNIQUEID "%s" ; ofx:UNIQUEIDTYPE "%s" ;'%(uniqueid,uniqueidtype)
return secid
#return 'ofx:SECID [ %s ] ;'%secid
def n3header(fout,version,context=None):
print >>fout,"""# Generated by %s""" % version
print >>fout,"""
@prefix ofx: <http://www.w3.org/2000/10/swap/pim/ofx#>.
@prefix ofxh: <http://www.w3.org/2000/10/swap/pim/ofx-headers#>.
@prefix xsd: <http://www.w3.org/2001/XMLSchema#>.
@prefix a3: <http://code.google.com/p/3account/wiki/Schema#>.
"""
if context:
print >>fout,"<file://%s>"%context
else:
print >>fout,"<>"
| Python |
########################################################################
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
########################################################################
"""Convert Citi N.A. (Citibank North America)
Portfolio (investment) Account Positions Detail CSV file to flat-OFX-RDF and
load into Sesame server.
"""
from csv2flt import *
import re
import os
import hashlib
from n3tagvalue import *
def timere(name):
return '((?P<%s_hour>\d\d):(?P<%s_min>\d\d)(?P<%s_ampm>(am|pm)))'%(name,name,name)
htf_list=[
#version 0
([
['Description', 'Cusip#', 'Quantity', 'NAV/Price', 'Change', 'Price As of', 'Value', 'Account#']
],
[
strre('SECNAME'),
'#(?P<SECID>\d{9})000',
floatre('UNITS'),
pvaluere('UNITPRICE'),
'(?P<CHANGE>[\-\+]\d+\.\d{2}|0)',
'((?P<month>\d{1,2})/(?P<day>\d\d)/(?P<year>\d{2}))|((?P<hour>\d{1,2}):(?P<min>\d\d)(?P<ampm>(am|pm)))',
pvaluere('MKTVAL',1),
'(?P<ACCTID>[\d\-A-Z]+)'],
[
['Cash Account Balance', ' ', ' ', ' ', ' ', ' ', commanumre('UNITS'), ' '],
['Total\*', ' ', ' ', ' ', ' ', ' ', valuere('TOTAL',1), ' '],
['(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2}) (?P<ampm>(AM|PM)) [A-Z][a-z]{2} (?P<day>\d{2}) (?P<month>[A-Z][a-z]*) (?P<year>\d{4}) ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']
]),
#version 1 from 6/2007
([
['Description \(SYM\)', 'Cusip#', 'Quantity', 'NAV/Price', 'Change', 'Price As of', 'Value', 'Account#']
],
[
'(?P<SECNAME>.+)\((?P<TICKER>[A-Z]+)\)\Z',
'#(?P<SECID>\d{9})000',
floatre('UNITS'),
pvaluere('UNITPRICE'),
'(?P<CHANGE>[\-\+]\d+\.\d{2}|0)',
'((?P<month>\d{1,2})/(?P<day>\d\d)/(?P<year>\d{2}))|((?P<hour>\d{1,2}):(?P<min>\d\d)(?P<ampm>(am|pm)))',
pvaluere('MKTVAL',1),
'(?P<ACCTID>[\d\-A-Z]+)'],
[
['Cash Account Balance', ' ', ' ', ' ', ' ', ' ', commanumre('UNITS'), ' '],
['Total\*', ' ', ' ', ' ', ' ', ' ', valuere('TOTAL',1), ' '],
['(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2}) (?P<ampm>(AM|PM)) [A-Z][a-z]{2} (?P<day>\d{2}) (?P<month>[A-Z][a-z]*) (?P<year>\d{4}) ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']
]),
#version 2 from 9/2007
([
['Description', 'Cusip#', 'Quantity', 'NAV/Price', 'Change', 'Price As of', 'Value', 'Account#','Symbol']
],
[
strre('SECNAME'),
'#(?P<SECID>[A-Z\d]{9})000',
floatre('UNITS'),
pvaluere('UNITPRICE'),
'(?P<CHANGE>[\-\+]\d+\.\d{2}|0)',
'((?P<month>\d{1,2})/(?P<day>\d\d)/(?P<year>\d{2}))|((?P<hour>\d{1,2}):(?P<min>\d\d)(?P<ampm>(am|pm)))',
pvaluere('MKTVAL',1),
'(?P<ACCTID>[\d\-A-Z]+)',
'(?P<TICKER>[A-Z]+)'],
[
['Cash Account Balance', ' ', ' ', ' ', ' ', ' ', commanumre('UNITS'), ' '],
['Total\*', ' ', ' ', ' ', ' ', ' ', valuere_optionalcomma('TOTAL',1), ' '],
['(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2}) (?P<ampm>(AM|PM)) [A-Z][a-z]{2} (?P<day>\d{2}) (?P<month>[A-Z][a-z]*) 0?(?P<year>\d{4}) ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']
]),
]
def t24(hour,ampm):
if ampm.upper()=='PM':
return '%02d'%(int(hour)+12)
else:
return '%02d'%int(hour)
def flat(csvfile,fout,context=None,hashing=False):
n3header(fout,"$Id$"[1:-1],context)
h,t,f,version=readcsvtable_multiformat(csvfile,htf_list)
# Extract from the footer the date in which the CSV file was extracted
month={'January':'01','February':'02','March':'03','April':'04','May':'05',
'June':'06','July':'07','August':'08','September':'09',
'October':'10','November':'11','December':'12'}[f[2]['month']]
statement_date=f[2]['year']+month+f[2]['day']
statement_dtasof=(statement_date+t24(f[2]['hour'],f[2]['ampm'])+
f[2]['min']+f[2]['sec'])
accttype='INVESTMENT'
table_total=0
accounts=[]
for l in t:
print >>fout,"a3:flatLine [ ",
units=float(l['UNITS'].replace(',',''))
unitprice=float(l['UNITPRICE'])
mktval=float(l['MKTVAL'])
if abs(mktval-units*unitprice)>0.5:
print mktval,units*unitprice
table_total+=mktval
if l['year']:
if len(l['year'])==4:
dtasof=l['year']
elif len(l['year'])==2:
dtasof="20"+l['year']
else:
raise Exception("Bad year %s"%l['year'])
if len(l['month'])==2:
dtasof+=l['month']
elif len(l['month'])==1:
dtasof+="0"+l['month']
else:
raise Exception("Bad month %s"%l['month'])
if len(l['day'])==2:
dtasof+=l['day']
elif len(l['day'])==1:
dtasof+="0"+l['day']
else:
raise Exception("Bad day %s"%l['day'])
dtasof+="235959" # last second of the day
else:
dtasof= statement_date+t24(l['hour'],l['ampm'])+l['min']+'59'
print >>fout,stagvalue('dtasof',dtasof),
secname=l['SECNAME']
ticker=l.get('TICKER',None) # Only version>0 has TICKER
print >>fout,n3secid('CUSIP',l['SECID']),
print >>fout,stagvalue('SECNAME',secname),
if ticker:
print >>fout,stagvalue('TICKER',ticker),
acctid=l['ACCTID']
accounts.append(acctid)
print >>fout,stagvalue('ACCTID',acctid,hashing=hashing),
print >>fout,stagvalue('DTASOF',statement_dtasof),
print >>fout,stagvalue('CURDEF','USD'),
print >>fout,stagvalue('ACCTTYPE','INVESTMENT'),
print >>fout,stagvalue('UNITS',units),stagvalue('UNITPRICE',unitprice),
print >>fout,stagvalue('MKTVAL',mktval),stagvalue('CURRENCY','USD'),
print >>fout,"];"
# Extract from the footer information on the cash account used for investment
print >>fout,"a3:flatLine [ ",
# surprisngly the information available is for one cash account even if
# there are several investment accounts reported in the same CSV file
# so we will generate an artifical account which cover cash in all accounts.
accounts=[svalue('ACCTID',a,hashing=hashing)[0] for a in set(accounts)]
accounts.sort()
accounts="".join(accounts)
print >>fout,stagvalue('ACCTID',accounts), # dont perform hashing
print >>fout,stagvalue('DTASOF',statement_dtasof),
cash = float(f[0]['UNITS'].replace(',',''))
print >>fout,stagvalue('CURDEF','USD'),
print >>fout,stagvalue('AVAILCASH',cash),
print >>fout,stagvalue('MARGINBALANCE',0.),stagvalue('SHORTBALANCE',0.),
print >>fout,"];"
print >>fout,"."
# Check total
table_total+=cash
total=float(f[1]['TOTAL'].replace(',',''))
if abs(total-table_total)>0.5:
print "Reported and computed totals dont match",total,table_total
| Python |
########################################################################
# Copyright (C) 2009 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
########################################################################
"""Convert a CSV file into a flat file using a set of tables of
regular expressions"""
import re
import csv
import logging
############ Set of regular expression macros used to build templates ##########
def eurdtzre(name):
"""regular expression for an European Date + Time + TZ
For example: '15.03.2007 / 21:46 CET'
'11.04.2007 / 11:49 CEST'
"""
return ('(?P<%s_day>\d\d)\.(?P<%s_month>\d\d)\.(?P<%s_year>\d{4}) '+
'/ (?P<%s_hours>\d\d):(?P<%s_min>\d\d) (?P<%s_TZ>[A-Z]{3,4})')%(
name,name,name,name,name,name)
def strre(name):
"""regular expression for a string"""
return '(?P<%s>.*)'%name
def currencyre(name):
"""regular expression for currency
For example: 'USD'
"""
return '(?P<%s>[A-Z]{3})'%name
def commanumre(name):
"""regular expression for a number with commas
For example: '9,894.97', '0', '-120.00'
"""
return '(?P<%s>((\-?\d{1,3}(,\d{3})*\.\d\d)|0))'%name
def commanumreint(name):
"""regular expression for a number with commas
For example: '9,894.97', '0', '-120.00'
"""
return '(?P<%s>((\-?\d{1,3}(,\d{3})*(\.\d\d)?)|0))'%name
def commaintre(name):
"""regular expression for an integer with commas
For example: '9,894', '0', '-120'
"""
return '(?P<%s>((\-?\d{1,3}(,\d{3})*)|0))'%name
def pvaluere(name,sig=2):
"""regular expression for a positive number with cents
'0.00', '100.12', '9894.97'
"""
return '(?P<%s>\d+\.\d{%d,2}|0)'%(name,sig)
def pvaluereint(name,sig=2):
"""regular expression for a positive number with cents
'0.00', '100.12', '9894.97'
"""
return '(?P<%s>\d+(\.\d{%d,2})?|0)'%(name,sig)
def valuere(name,sig=2):
"""regular expression for a number with cents
'0.00', '100.12', '9894.97'
"""
return '(?P<%s>\-?\d+\.\d{%d,2}|0)'%(name,sig)
def valuereint(name,sig=2):
"""regular expression for a number with cents
'0.00', '100.12', '9894.97'
"""
return '(?P<%s>\-?\d+(\.\d{%d,2})?|0)'%(name,sig)
def valuere_optionalcomma(name,sig=2):
"""regular expression for a number with cents
'0.00', '100.12', '9894.97', '9,894.97'
"""
return '(?P<%s>\-?((\d{1,3}(,\d{3})*)|(\d+))\.\d{%d,2}|0)'%(name,sig)
def pintre(name):
"""regular expression for an int number
"""
return '(?P<%s>\d+)'%(name)
def floatre(name):
"""regular expression for a float number
"""
return '(?P<%s>\-?\d+(\.\d*)?)'%(name)
def eurdre24(name,sep='.'):
"""regular expression for an European date
'20.02.2007'
"""
return '(?P<%s_day>\d\d)\%s(?P<%s_month>\d\d)\%s(?P<%s_year>\d\d(\d\d)?)'%(
name,sep,name,sep,name)
def eurdre(name,sep='.',year_len=4):
"""regular expression for an European date
'20.02.2007'
"""
return '(?P<%s_day>\d\d)\%s(?P<%s_month>\d\d)\%s(?P<%s_year>\d{%d})'%(
name,sep,name,sep,name,year_len)
def usdre(name,sep='/',year_len=2):
"""regular expression for a US date
'12/29/06'
"""
return '(?P<%s_month>\d\d)\%s(?P<%s_day>\d\d)\%s(?P<%s_year>\d{%d})'%(
name,sep,name,sep,name,year_len)
def empty(name):
return '(?P<%s>)'%name
##################### Parse a CSV file with fixed structure ####################
def matchline(row,regexps):
"""Match a list of strings (a parsed line from a CSV file) with a list of
regular expressions (template line) and return a dictionary of all the named
groups found in all matches or None if there is no match"""
maxncell=min([len(row),len(regexps)])
for i in range(maxncell,len(row)):
if row[i]:
return
for i in range(maxncell,len(regexps)):
if not re.match('\\A'+regexps[i]+'\\Z',''):
return
objs={}
for ncell,cell in enumerate(row):
if ncell>=maxncell:
break
r=regexps[ncell]
obj=re.match('\\A'+r+'\\Z',cell)
if not obj:
logging.info("%s does not match re %s"%(cell,r))
return
d=obj.groupdict()
for k in d:
if k not in objs or objs[k]==None:
objs[k]=d[k]
elif d[k]==None: continue
elif d[k]!=objs[k]:
raise Exception("Conflicting match")
return objs
def fixedtable(reader,regexps):
"""Read a CSV file and match it to a fixed template made from list of
regular expressions for every line in the CSV.
Return a list of diconaries, one for each line, each dictonary contains all
the named groups found."""
table=[]
if not regexps: return table
for nline,row in enumerate(reader):
regs=regexps[nline]
objs=matchline(row,regs)
if objs==None:
logging.info('Mismatch at line %d:'%nline)
logging.info('%s','ROW: %s'%row)
logging.info('%s','REG: %s'%regs)
return None
table.append(objs)
if nline==len(regexps)-1: return table
raise Exception('File too short')
def multilinetable(reader,line,footer=None,optional_footer=False):
"""Read a CSV table using line which is a list of regular expresions
that should each match the appropriate cell in each line.
Repeat the process until EOF or until there is a match with a footer
which is an RE of a fixed table
"""
table=[]
footer_table=[]
footer_line=0
first_footer=True
for row in reader:
if footer:
foot_match=False
while True:
if footer_line>=len(footer):
raise Exception('File too long, line %d'%reader.line_num)
#try:
# If a footer line is empty then check that all cells in row
# are empty and continue to next row.
# If any of the cells in row is not empty then skip the empty footer
# line
last_empty_row=None
if footer[footer_line]==None:
if not any(row):
last_empty_row=row
foot_match=True
break
footer_line+=1
if footer_line>=len(footer):
footer_objs=None
else:
footer_objs=matchline(row,footer[footer_line])
if footer_objs!=None:
footer_table.append(footer_objs)
footer_line+=1
foot_match=True
break
else:
if first_footer:
footer_line=0
# if we failed to match then if we are on the first line
# of the foot (0) the footer didnt started yet.
if not footer_line:
break
# if we are inside the footer then there is one chance
# that we have a match:
# the current footer regex should have been applied to the
# previous line if the previouse line was taken to be a
# a match with None
if (last_empty_row and
matchline(last_empty_row,footer[footer_line])):
footer_line+=1
continue # try matching the next footer line with current row
raise Exception('Footer line %d does not match line %d'%(
footer_line,reader.line_num))
if foot_match:
first_footer=False
continue
assert(footer_line==0) # Entire footer must match once the first line was matched
objs=matchline(row,line)
if objs==None:
logging.info('%s','ROW: %s'%row)
logging.info('%s','LIN: %s'%line)
raise Exception("Failed to match")
#return (None,None)
table.append(objs)
if footer_line==0 and optional_footer:
pass
else:
if footer and footer_line<len(footer):
if footer<len(footer)-1 or not last_empty_row:
raise Exception('Only %d lines of footer where found'%footer_line)
return (table,footer_table)
def readcsvtable_multiformat(csvfile,htf_list):
for version,htf in enumerate(htf_list):
logging.info("Attempting version %d"%version)
h,t,f = readcsvtable(csvfile,htf[0],htf[1],htf[2])
if h!=None: break
if h==None:
raise Exception('Unknwon file format')
return h,t,f,version
def readcsvtable(fname,header=[],line=[],footer=[],optional_footer=False):
"""Its assumed that the CSV file has a fixed structure:
It starts with a fixed header lines
followed by zero or more lines each with the same structure
and then followed by EOF or a footer line
Return a list of dictonaries for the header, a list of diconaries for the table
and a dictonary for the footer.
Each dictonary contains all the named groups that where found in one line.
"""
if isinstance(fname,str):
fp=open(fname, "rb")
else:
fp=fname
fp.seek(0)
reader = csv.reader(fp)
logging.info("Reading header")
h=fixedtable(reader,header)
if h==None: return (None,None,None)
logging.info("Reading table & footer")
t,f = multilinetable(reader,line,footer,optional_footer)
return (h,t,f)
| Python |
########################################################################
# scanner - scanf with multiple line template and regex
# Copyright (C) 2007,8 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
########################################################################
""" Scan input text file using a template file.
Useful to extract information from flat, line oriented text files that differ in
well defined locations from a reference template file (e.g. a bank statement.)
The template defines
all the parts that are expected to change, extract some of them and validty that
nothing else has changed. This is similar to the C scanf function that works on
a single line and is somewhat the reverse of embedded languages that allows you
to do small changes to a template file.
The goal of this routine is to simplfy the template as much as possible.
When creating a template start with an example of an input file, keep as-is the
parts of the file that are expected to remain fixed (e.g. headers and footers)
and replace the parts that are expected to change with a regular-expression.
For example, suppose the input is something like:
'"Junk" total on 9/25/06 is *** 12345 *** change 1.2%'
and the only part that changes is the 5 digit total value.
The template can be:
'"Junk" total on 9/25/06 is *** @"....."@ *** change 1.2%'
The expression is surronded by '@' which are removed and the remaining string is
evaluated as a Python expression which eats the two double-quotes.
(if you want to use '@' character for other usages escape it by adding '\' before it)
The result of the evalution should be a regular expression, in this case '.....',
which will match any 5 characters.
If you want to limit the template to 5 digits you can use the following:
'"Junk" total on 9/25/06 is *** @"\\d{5}"@ *** change 1.2%'
Where '\\' is used because one Python evalution process, and '{5}' is part of Python's regular expression syntax.
You can have any number of Python expressions in the same line. For example:
'"@".*?"@ total on 9/25/06 is *** @"\\d{5}"@ *** change 1.2%'
Each expression can have in it named groups and the result of their match
is kept in a list of dictionaries which is returned by this function.
For example, the template:
'"Junk" total on 9/25/06 is *** @"(?P<mytotal>\\d+)"@ *** change 1.2%'
will give: [{'mytotal': '12345'}]
However, there is one limitation, the entire template line is first converted
into a single regular expression so it is not possible to use the same group
name in more than one expression in the same line, also the usage of numbered
group can be confusing and should be avoided. To help with this you can use
group names that begin with an underline ('_') these group names are temporary
and will not appear in the final list of dictonaries.
The namespace used when evaluting the Python expression comes pre-loaded with
several functions that can be used to simplfy the template.
For example, the template:
'@s('name')@ total on @date('when')@ is *** @d('mytotal')@ *** change @f('delta')%'
will give: [{'name': 'Junk', 'when': '9/25/06', 'mytotal': 12345, 'delta': 1.2}]
Note that the value for 'mytotal' is an integer and for 'delta' is a float.
You can add your own functions by adding them to namespace variable.
For example:
namespace["mytotal"]=lambda : '(?P<mytotal>\\d{5})'
and then using it in a template
"Junk" total on 9/25/06 is *** @mytotal()@ *** change 1.2%'''
Will ensure that total is made of exactly 5 digits and otherwise the entire
match will fail. The resulting dictonary will show {'mytotal': '12345'}
Note that the value in the dictionary is a string. This can be solved by adding
another function
called 'post_mytotal' that will receive as input the string matched to 'mytotal':
Adding to the above example:
namespace["post_mytotal"]=lambda s: int(s)
will give the dictonary entry {'mytotal': 12345}
For more ideas, see below how namespace is built and the examples in the main part.
Up to now we talked about a single line from the template compared with a single
line from input. Both can have more lines: every line in template is converted
to one regular expression and is made to match the next input line.
Note that expressions surronded by '@' must start and end in the same template
line. Each succssful match returns a dictonary of group names and their values,
and the entire dictonary (if not empty) is appended to a list which is returned
by this function. As an aid, the entry 'nline' which keeps track of the line
number is added to each dictonary. The entire match process fails if any of
these matches fails, and the function returns None. For example, the input:
'''Line#,Voltage
1,12.2
2,23.4
3,4'''
can be scanned with the template:
'''Line#,Voltage
@d('_index')@,@f('value')@
@d('_index')@,@f('value')@
@d('_index')@,@f('value')@'''
which will give the following list of dictonaries:
[{'nline': 1, 'value': 12.2}, {'nline': 2, 'value': 23.4}, {'nline': 3, 'value': 4.0}]
Note that the first line (nline=0) is matched, but since it does not have any group names,
its dictonary was empty and therefore it did not appear in the final list.
Also note that the first column is validated to be an integer, but,
its value does not appear in the result since '_index' begins with an underline.
In the above example the input contains a table made from several similar lines.
In this case a single template line can be used by placing a '*' at the begining
of the template line. The same template will be used to match one line after the
other from the input until its end or until the match will fail,
in this case instead of failing the entire match process,
the next line from the template file will be attempteted.
For example, the input:
'''Line#,Voltage
1,12.2
2,23.4
3,4
Total 39.6'''
can be scanned with the template:
'''Line#,Voltage
*@d('_index')@,@f('value')@
Total @f('total')@
'''
"""
import re
class scannerror(Exception):
""" Failed to match tempate with input. """
def __init__(self, i_nline,t_nline,inline,r):
""" A value of -1 indicates End-Of-File """
self.i_nline = i_nline
self.t_nline= t_nline
self.inline=inline
self.expression=r
def __str__(self):
return 'Failed to match on line %d of input and line %d of template\n%s\n%s\n'%(self.i_nline, self.t_nline,self.inline,self.expression)
def scanner(template,input):
# Generate a list of regular expresions lines out of the template
txtreg = []
# For each line keeps a modifier: NORMAL_LINE - match one line, MULTI_LINE - match zero or more lines
NORMAL_LINE, MULTI_LINE, EXT_LINE = 0,1,2
txtmod = []
try:
template_iter=template.readline # TODO: does not work as an iterator
except:
template_iter=template.splitlines
# for t_nline, l in enumerate(template.splitlines()):
for t_nline, l in enumerate(template_iter()):
txtreg_l=[]
if l.startswith('*'):
l=l[1:]
txtmod.append(MULTI_LINE)
elif l.startswith('&'):
l=l[1:]
txtmod.append(EXT_LINE)
else:
txtmod.append(NORMAL_LINE)
# Within each TEXT line, '@' is used to delemite python expressions
lsegments = re.compile(r"\@(?<!\\)(.*?)\@(?<!\\)").split(l)
for j, lsegment in enumerate(lsegments):
if not lsegment: continue
if j % 2:
try:
txtreg_l.append(eval(lsegment,namespace))
except SyntaxError, v:
v.lineno = t_nline
raise
else:
lsegment = lsegment.replace("\\@","@")
lsegment = re.escape(lsegment)
txtreg_l.append(lsegment)
txtreg.append("^" + "".join(txtreg_l) + "$")
# match every line in txtreg with every line in input
# A line in txtreg that begins with a '*' is used for as many lines as it can in input and
# when it fails to match, the next line in txtreg is used
t_nline=0
output=[]
multimode=False
t_multinline=-1
# input=input.splitlines()
# for i_nline, inline in enumerate(input):
try:
input_iter=input.readlines()
except:
input_iter=input.splitlines()
for i_nline, inline in enumerate(input_iter):
while True:
# check if there are more input lines than template lines
if t_nline >= len(txtreg):
raise scannerror(i_nline,-1)
r=txtreg[t_nline]
extmode=False
if multimode and txtmod[t_nline]==EXT_LINE:
extmode=True
elif txtmod[t_nline]==MULTI_LINE:
multimode=True
t_multinline=t_nline
else:
multimode=False
found=re.match(r,inline)
if not found:
if multimode:
if t_nline==t_multinline:
# if we are on the first line of a multi line segement and we failed then we should skip the multi line segment
multimode=False
while True:
t_nline+=1
if t_nline >= len(txtreg) or txtmod[t_nline]!=EXT_LINE: break
continue
elif t_nline < len(txtreg)-1 and txtmod[t_nline+1]!=EXT_LINE:
# if we are at the last line of a multi line segment then perhaps we should start a new match with the entire multiline segment
r=txtreg[t_multinline]
found=re.match(r,inline)
if not found:
multimode=False
t_nline+=1
continue
else:
t_nline=t_multinline
else:
raise scannerror(i_nline,t_nline,inline,r)
else:
raise scannerror(i_nline,t_nline,inline,r)
linedict=found.groupdict()
if linedict:
# ignore names that start with '_'
linedict = dict([(name,linedict[name]) for name in linedict if not name.startswith('_')])
# For every name found, check if there is a post-processing function
for name in linedict:
try:
post=namespace['post_'+name]
linedict[name]=post(linedict[name])
except KeyError:
pass
linedict["nline"]=i_nline
output.append(linedict)
t_nline+=1
if multimode and (t_nline >= len(txtreg) or txtmod[t_nline]!=EXT_LINE):
t_nline=t_multinline
break
# check if we have leftover in txtreg that should have been matched
if t_nline < len(txtreg):
if multimode and txtmod[t_nline]==EXT_LINE:
raise scannerror(-1,i)
for i in range(t_nline,len(txtreg)-1):
if txtmod[i]==NORMAL_LINE:
raise scannerror(-1,i,'EOF',txtreg[t_nline])
return output
def f(n):
# Define the post processing for a float number: casting it from string to float.
namespace['post_'+n] = lambda s: float(s)
# Define a regular expression for float and embedd the name of the group
return '(?P<%s>[+-]?\d+(\.\d*)?)'%n
def d(n):
# Define the post processing for a int number: casting it from string to int.
namespace['post_'+n] = lambda s: int(s)
# Define a regular expression for float and embedd the name of the group
return '(?P<%s>\d+)'%n
namespace={\
"s": lambda n: '((?P<_%s>\\")|(?!\\"))(?P<%s>.*?)((?(_%s)\\"|(?<!\\")))'%(n,n,n),\
"d": d,\
"f": f,\
"date": lambda n: '(?P<%s>(\d|1[0-2])/\d{2}/\d{2})'%n,\
}
# Bellow is a list of examples of how to use the program.
if __name__=='__main__':
input = r"""1,2
3,4
"""
template = r"""*@'\d'@,@'\d'@
&"""
try:
print scanner(template,input)
except scannerror, v:
print v
input = 'Hello world'
template = 'Hello world'
print scanner(template,input)
input = r'''
Hello world
1,2
3,4
5,A
'''.strip()
template = r'''
Hello world
*@'\d'@,@'\d'@
'''.strip()
try:
print scanner(template,input)
except scannerror, v:
print v
template = r'''
Hello world
*@'\d'@,@'[a-zA-Z\d]'@
'''.strip()
try:
print scanner(template,input)
except SyntaxError, v:
print v
input = r'"Junk" total on 9/25/06 is *** 12345 *** change 1.2%'
namespace["post_mytotal"] = lambda s: int(s)
namespace["foo"] = lambda : '(?P<mytotal>\d{5})'
template = r'''
@s('name')@ total on @date('when')@ is *** @foo()@ *** change @f('delta')@%
'''.strip()
print scanner(template,input)
input = r"""
Line#,Voltage
1,12.2
2,23.4
3,4
Total 39.6
""".strip()
template = r"""
Line#,Voltage
*@d('_index')@,@f('value')@
Total @f('total')@
""".strip()
print scanner(template,input)
# Example of stripping the header out of a file and printing the rest
template=r"""
Line#,Voltage
*@s('line')@
""".strip()
def print_line(s):
print s
namespace["post_line"]=print_line
scanner(template,input)
| Python |
from rdflib import Namespace, URIRef, Literal, BNode
from rdflib.graph import Graph
from urllib import quote_plus
from httplib import HTTPConnection
from cStringIO import StringIO
import xml.dom
from simplejson import loads
owlNS = Namespace("http://www.w3.org/2002/07/owl#")
owlClass = owlNS["Class"]
owlObjectProperty = owlNS["ObjectProperty"]
owlDatatypeProperty = owlNS["DatatypeProperty"]
rdfNS = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
rdfProperty = rdfNS["Property"]
rdfType = rdfNS["type"]
rdfsNS = Namespace("http://www.w3.org/2000/01/rdf-schema#")
rdfsSubClassOf = rdfsNS["subClassOf"]
rdfsDomain = rdfsNS["domain"]
rdfsRange = rdfsNS["range"]
class SesameTransaction:
def __init__(self):
self.trans = xml.dom.getDOMImplementation().createDocument(None, "transaction", None)
def add(self, statement):
self.__addAction("add", statement)
def remove(self, statement):
self.__addAction("remove", statement)
def toXML(self):
return self.trans.toxml()
def __addAction(self, action, statement):
element = self.trans.createElement(action)
for item in statement:
if isinstance(item, Literal):
literal = self.trans.createElement("literal")
if item.datatype is not None: literal.setAttribute("datatype", str(item.datatype))
if item.language is not None: literal.setAttribute("xml:lang", str(item.language))
literal.appendChild(self.trans.createTextNode(str(item)))
element.appendChild(literal)
elif isinstance(item, URIRef):
uri = self.trans.createElement("uri")
uri.appendChild(self.trans.createTextNode(str(item)))
element.appendChild(uri)
elif isinstance(item, BNode):
bnode = self.trans.createElement("bnode")
bnode.appendChild(self.trans.createTextNode(str(item)))
element.appendChild(bnode)
else:
raise Exception("Unknown element: " + item)
self.trans.childNodes[0].appendChild(element)
class SesameConnection:
def __init__(self, host, repository=None):
self.host = host
self.repository = repository
self.sparql_prefix=""
def addnamespace(self, id, ns):
self.sparql_prefix+='PREFIX %s:<%s>\n' % (id,ns)
def repositories(self):
return self.__getsparql__('repositories')
def use_repository(self, r):
self.repository = r
def __request__(self, method, path, data, headers):
conn = HTTPConnection(self.host)
conn.request(method, path, data, headers)
response = conn.getresponse()
if response.status != 200 and response.status != 204:
raise Exception("Sessame connection error " + str(response.status) + " " + response.reason)
response = response.read()
conn.close()
return response
def query(self, query, graph):
path = '/openrdf-sesame/repositories/' + self.repository + '?query=' + quote_plus(self.sparql_prefix + query)
data = self.__request__("GET", path, None, {"accept":"application/rdf+xml"})
graph.parse(StringIO(data))
return graph
def querylist(self, query):
path = '/openrdf-sesame/repositories/' + self.repository + '?query=' + quote_plus(self.sparql_prefix + query)
data = self.__request__("GET", path, None, {"accept":'application/sparql-results+json'})
try:
result=loads(data)['results']['bindings']
return result
except:
return [{'error':data}];
def update(self, add=None, remove=None):
path = '/openrdf-sesame/repositories/' + self.repository + "/statements"
trans = SesameTransaction()
if remove is not None:
for statement in remove: trans.remove(statement)
if add is not None:
for statement in add: trans.add(statement)
data = self.__request__("POST", path, trans.toXML(), {"content-type":"application/x-rdftransaction"})
class OWLOntology:
"""
This class loads the mappings from simple property names
to OWL property URIs.
"""
def __init__(self, sesameConnection):
# query for all OWL classes and properties:
self._ontGraph = Graph()
sesameConnection.query(
"""construct {
?c rdf:type owl:Class .
?c rdfs:subClassOf ?sc .
?p rdfs:domain ?c .
?p rdfs:range ?d .
?p rdf:type ?pt .
?p rdfs:subPropertyOf ?sp .
} where
{
?c rdf:type owl:Class .
OPTIONAL {
?c rdfs:subClassOf ?sc .
}
OPTIONAL {
?p rdfs:domain ?c .
?p rdfs:range ?d .
?p rdf:type ?pt .
}
OPTIONAL {
?p rdfs:subPropertyOf ?sp .
}
}""", self._ontGraph)
# map type properties to simplified names:
self.propertyMaps = {}
for ontClass in self._ontGraph.subjects(rdfType, owlClass):
propertyMap = self.propertyMaps[ontClass] = {}
for property in self._ontGraph.subjects(rdfsDomain, ontClass):
propertyName = self.getSimplifiedName(property)
propertyMap[propertyName] = property
for property in self._ontGraph.subjects(rdfsRange, ontClass):
propertyName = "r_" + self.getSimplifiedName(property)
propertyMap[propertyName] = property
# recursivley copy property mappings across the class hierarchy:
def copySuperclassProperties(ontClass, propertyMap):
for superclass in self._ontGraph.objects(ontClass, rdfsSubClassOf):
copySuperclassProperties(superclass, propertyMap)
propertyMap.update(self.propertyMaps[ontClass])
for ontClass in self._ontGraph.subjects(rdfType, owlClass):
copySuperclassProperties(ontClass, self.propertyMaps[ontClass])
def getSimplifiedName(self, uri):
if "#" in uri: return uri[uri.rfind("#") + 1:]
return uri[uri.rfind("/") + 1:]
class RDFObjectGraphFactory:
"""
A factory for RDFObjects.
"""
def __init__(self, connection):
self.connection = connection
self.connection.addnamespace("xsd", "http://www.w3.org/2001/XMLSchema#")
self.connection.addnamespace("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#")
self.connection.addnamespace("rdfs", "http://www.w3.org/2000/01/rdf-schema#")
self.connection.addnamespace("owl", "http://www.w3.org/2002/07/owl#")
self.ontology = OWLOntology(connection)
def createGraph(self):
return RDFObjectGraph(self.connection, self.ontology)
class RDFObjectGraph:
"""
The RDFObjectGraph caches object values for populating RDFObject values.
"""
def __init__(self, connection, ontology):
self._connection = connection
self._ontology = ontology
self._rdfObjects = {}
self._graph = Graph()
self._added = Graph()
self._removed = Graph()
def get(self, uri):
"""
Gets an RDFObject for the specified URI.
"""
if uri not in self._rdfObjects:
self._load(uri)
self._rdfObjects[uri] = RDFObject(uri, self)
return self._rdfObjects[uri]
def _load(self, uri):
"""
This method ensures that the data for a uri is loaded into
the local graph.
"""
if uri not in self._rdfObjects:
self._connection.query(
"construct { <" + uri + "> ?p ?o . " +
"?rs ?rp <" + uri + "> .} where { " +
"OPTIONAL { <" + uri + "> ?p ?o } " +
"OPTIONAL { ?rs ?rp <" + uri + "> } }", self._graph)
def _subjects(self, prop, uri):
"""
Retrieves all subjects for a property and object URI.
"""
for triple in self._graph.triples((None, prop, uri)):
if triple not in self._removed:
yield triple[0]
for triple in self._added.triples((None, prop, uri)):
yield triple[0]
def _objects(self, uri, prop):
"""
Retrieves all objects for a subject URI and property.
"""
for triple in self._graph.triples((uri, prop, None)):
if triple not in self._removed:
yield triple[2]
for triple in self._added.triples((uri, prop, None)):
yield triple[2]
def _predicates(self, subject=None, object=None):
"""
Retrieves all unique predicates for a subject or object URI.
"""
result = set()
for triple in self._graph.triples((subject, None, object)):
if triple not in self._removed:
result.add(triple[1])
for triple in self._added.triples((subject, None, object)):
result.add(triple[1])
return result
def _setSubjects(self, values, prop, uri):
"""
Sets all subjects for a property and uri.
"""
newValues = set(values)
existingValues = set(self._graph.subjects(prop, uri))
for value in existingValues - newValues:
removed = (value, prop, uri)
self._added.remove(removed)
self._removed.add(removed)
for value in newValues - existingValues:
added = (value, prop, uri)
self._removed.remove(added)
self._added.add(added)
def _setObjects(self, uri, prop, values):
"""
Sets all objects for a uri and property.
"""
newValues = set(values)
existingValues = set(self._graph.objects(uri, prop))
for value in existingValues - newValues:
removed = (uri, prop, value)
self._added.remove(removed)
self._removed.add(removed)
for value in newValues - existingValues:
added = (uri, prop, value)
self._removed.remove(added)
self._added.add(added)
def commit(self):
"""
Commits changes to the remote graph and flushes local caches.
"""
self._connection.update(add=self._added, remove=self._removed)
self._rdfObjects = {}
self._graph = Graph()
self._added = Graph()
self._removed = Graph()
class RDFObject:
"""
The RDFObject wraps an RDF URI and automatically retrieves property values
as they are referenced as object attributes.
"""
def __init__(self, uri, objectGraph):
self.__dict__["uri"] = uri
self.__dict__["_objectGraph"] = objectGraph
def __repr__(self):
return "<RDFObject " + self.uri + ">"
def __str__(self):
return self.uri
def __getattr__(self, name):
self._objectGraph._load(self.uri)
prop = self._getProp(name)
if name.startswith("r_"):
values = self._objectGraph._subjects(prop, self.uri)
else:
values = self._objectGraph._objects(self.uri, prop)
results = self._wrapResults(values)
return results
def __setitem__(self, name, values):
self.__setattr__(name,values)
def __setattr__(self, name, values):
self._objectGraph._load(self.uri)
unwrappedValues = []
for value in values:
# unwrap rdfobjects:
if isinstance(value, RDFObject):
unwrappedValues.append(value.uri)
# pass through rdflib objects:
elif isinstance(value, URIRef) or isinstance(value, BNode) or isinstance(value, Literal):
unwrappedValues.append(value)
# wrap literals:
else:
unwrappedValues.append(Literal(value))
# look for a property mapping for this name:
prop = self._getProp(name)
if name.startswith("r_"):
self._objectGraph._setSubjects(unwrappedValues, prop, self.uri)
else:
self._objectGraph._setObjects(self.uri, prop, unwrappedValues)
def _getProp(self, name):
if name == "type": return rdfType
for type in self._objectGraph._objects(self.uri, rdfType):
propertyMap = self._objectGraph._ontology.propertyMaps[type]
if name in propertyMap: return propertyMap[name]
raise AttributeError("Unknown property '" + name + "'")
def __getitem__(self, key):
self._objectGraph._load(self.uri)
# iterate over predicates and look for a matching name:
reverse = key.startswith("r_")
if reverse:
preds = self._objectGraph._predicates(object=self.uri)
name = key[2:]
else:
preds = self._objectGraph._predicates(subject=self.uri)
name = key
for pred in preds:
if self._objectGraph._ontology.getSimplifiedName(pred) == name:
if reverse:
values = self._objectGraph._subjects(pred, self.uri)
else:
values = self._objectGraph._objects(self.uri, pred)
return self._wrapResults(values)
raise KeyError("Property '" + key + "' not found")
def _wrapResults(self, results):
ret = []
for result in results:
if isinstance(result, Literal): ret.append(result)
else: ret.append(self._objectGraph.get(result))
return ret
if __name__ == "__main__":
sc = SesameConnection("localhost:8080", "semprog")
factory = RDFObjectGraphFactory(sc)
objectGraph = factory.createGraph()
filmNs = Namespace("http://www.semprog.com/film#")
bladerunner = objectGraph.get(filmNs["blade_runner"])
harrisonford = objectGraph.get(filmNs["harrison_ford"])
print bladerunner.type
print bladerunner.name[0]
print bladerunner.starring[0].has_actor[0].name[0]
print bladerunner.starring[0].has_actor[0].r_has_actor[0].r_starring
print harrisonford.name[0]
print harrisonford.r_has_actor[0].r_starring
print bladerunner["name"][0]
print bladerunner["starring"][0]["has_actor"][0]["name"][0]
names = bladerunner.name
names.append("Do Androids Dream of Electric Sheep?")
bladerunner.name = names
print bladerunner.name
objectGraph.commit()
print bladerunner.name
bladerunner.name = ["Blade Runner"]
objectGraph.commit()
print bladerunner.name
raiders = objectGraph.get(filmNs["raiders_of_the_lost_ark"])
raiders.type = [filmNs["Film"]]
raiders.name = ["Raiders of the Lost Ark"]
perf2 = objectGraph.get(filmNs["perf2"])
perf2.type = [filmNs["Performance"]]
indy = objectGraph.get(filmNs["indy"])
indy.type = [filmNs["Role"]]
indy.name = ["Indiana Jones"]
perf2.r_starring = [raiders, bladerunner]
perf2.has_actor = [harrisonford]
perf2.has_role = [indy]
objectGraph.commit()
print indy.name
print raiders.name
perf2.r_starring = [raiders]
objectGraph.commit()
print perf2.r_starring
print raiders.starring[0].has_actor[0].uri
print harrisonford.r_has_actor
| Python |
import urllib2
from urllib import quote_plus
from simplejson import loads
from httplib import HTTPConnection
import urlparse
class connection:
def __init__(self,url):
self.baseurl=url
self.sparql_prefix=""
self.host=urlparse.urlparse(url).netloc
def addnamespace(self,id,ns):
self.sparql_prefix+='PREFIX %s:<%s>\n' % (id,ns)
def __getsparql__(self,method):
req=urllib2.Request(self.baseurl+method,
headers={'Accept':'application/sparql-results+json'})
data=urllib2.urlopen(req)
data=data.read()
try:
result=loads(data)['results']['bindings']
return result
except:
return [{'error':data}];
def repositories(self):
return self.__getsparql__('repositories')
def use_repository(self,r):
self.repository=r
def query(self,q):
q='repositories/'+self.repository+'?query='+quote_plus(self.sparql_prefix+q)
return self.__getsparql__(q)
def construct_query(self,q):
q='repositories/'+self.repository+'?query='+quote_plus(self.sparql_prefix+q)
data=urllib2.urlopen(urllib2.Request(self.baseurl+q,headers={'Accept':'application/sparql-results+json'})).read()
return data
def postdata(self,data,context=None):
#host=self.baseurl+'/repositories/'+self.repository+'/statements?context=%3Chttp://semprog.com/pysesame%3E'
host=self.baseurl+'/repositories/'+self.repository+'/statements'
if context:
host += '?context=' + quote_plus(context)
res=urllib2.urlopen(urllib2.Request(host,data,{'Content-Type':'application/rdf+xml;charset=UTF-8'})).read()
return res
def __request__(self, method, path, data, headers):
conn = HTTPConnection(self.host)
conn.request(method, path, data, headers)
response = conn.getresponse()
if response.status != 200 and response.status != 204:
raise Exception("Sessame connection error " +
str(response.status) + " " + response.reason)
response = response.read()
conn.close()
return response
def putdata(self,data,context=None):
host=self.baseurl+'/repositories/'+self.repository+'/statements'
if context:
host += '?context=' + quote_plus(context)
return self.__request__("PUT", host, data,
{'Content-Type':
'application/rdf+xml;charset=UTF-8'})
if __name__=='__main__':
c=connection('http://localhost:8080/openrdf-sesame/')
c.use_repository('Movies')
c.addnamespace('fb','http://rdf.freebase.com/ns/')
c.addnamespace('dc','http://purl.org/dc/elements/1.1/')
res=c.query("""SELECT ?costar ?fn WHERE {?film fb:film.film.performances ?p1 .
?film dc:title ?fn .
?p1 fb:film.performance.actor ?a1 .
?a1 dc:title "John Malkovich".
?film fb:film.film.performances ?p2 .
?p2 fb:film.performance.actor ?a2 .
?a2 dc:title ?costar .}""")
for r in res:
print r | Python |
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Sep 8 2010)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
wx.ID_Window = 1000
wx.ID_Window_StatusBar = 1001
wx.ID_Window_MenuBar = 1002
wx.ID_Window_Quit = 1003
wx.ID_Window_SplitterWindow_LeftPanel = 1004
###########################################################################
## Class Window
###########################################################################
class Window ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_Window, title = u"Klein", pos = wx.DefaultPosition, size = wx.Size( 705,238 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
self.mStatusBar = self.CreateStatusBar( 1, wx.ST_SIZEGRIP, wx.ID_Window_StatusBar )
self.mMenuBar = wx.MenuBar( 0 )
self.mFile = wx.Menu()
self.mQuit = wx.MenuItem( self.mFile, wx.ID_Window_Quit, u"Quit", wx.EmptyString, wx.ITEM_NORMAL )
self.mFile.AppendItem( self.mQuit )
self.mMenuBar.Append( self.mFile, u"File" )
self.SetMenuBar( self.mMenuBar )
mSizer = wx.BoxSizer( wx.HORIZONTAL )
self.mSplitterWindow = wx.SplitterWindow( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.SP_3D )
self.mSplitterWindow.Bind( wx.EVT_IDLE, self.mSplitterWindowOnIdle )
self.mLeftPanel = wx.Panel( self.mSplitterWindow, wx.ID_Window_SplitterWindow_LeftPanel, wx.DefaultPosition, wx.DefaultSize, 0 )
mRightSizer = wx.BoxSizer( wx.VERTICAL )
self.mCanvasPanel = wx.Panel( self.mLeftPanel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
self.mCanvasPanel.SetBackgroundColour( wx.Colour( 128, 128, 128 ) )
mRightSizer.Add( self.mCanvasPanel, 1, wx.EXPAND |wx.ALL, 5 )
self.mLeftPanel.SetSizer( mRightSizer )
self.mLeftPanel.Layout()
mRightSizer.Fit( self.mLeftPanel )
self.mRightPanel = wx.Panel( self.mSplitterWindow, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.VSCROLL )
mLeftSizer = wx.BoxSizer( wx.VERTICAL )
self.m_button38 = wx.Button( self.mRightPanel, wx.ID_ANY, u"1", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button38, 0, wx.ALL, 5 )
self.m_button39 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button39, 0, wx.ALL, 5 )
self.m_button40 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button40, 0, wx.ALL, 5 )
self.m_button41 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button41, 0, wx.ALL, 5 )
self.m_button42 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button42, 0, wx.ALL, 5 )
self.m_button43 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button43, 0, wx.ALL, 5 )
self.m_button44 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button44, 0, wx.ALL, 5 )
self.m_button45 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button45, 0, wx.ALL, 5 )
self.m_button46 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button46, 0, wx.ALL, 5 )
self.m_button47 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button47, 0, wx.ALL, 5 )
self.m_button48 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button48, 0, wx.ALL, 5 )
self.m_button49 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button49, 0, wx.ALL, 5 )
self.m_button50 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button50, 0, wx.ALL, 5 )
self.m_button51 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button51, 0, wx.ALL, 5 )
self.m_button52 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button52, 0, wx.ALL, 5 )
self.m_button53 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button53, 0, wx.ALL, 5 )
self.m_button54 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button54, 0, wx.ALL, 5 )
self.m_button55 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button55, 0, wx.ALL, 5 )
self.m_button56 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button56, 0, wx.ALL, 5 )
self.m_button57 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button57, 0, wx.ALL, 5 )
self.m_button58 = wx.Button( self.mRightPanel, wx.ID_ANY, u"-1", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button58, 0, wx.ALL, 5 )
self.mRightPanel.SetSizer( mLeftSizer )
self.mRightPanel.Layout()
mLeftSizer.Fit( self.mRightPanel )
self.mSplitterWindow.SplitVertically( self.mLeftPanel, self.mRightPanel, 486 )
mSizer.Add( self.mSplitterWindow, 1, wx.EXPAND, 5 )
self.SetSizer( mSizer )
self.Layout()
self.Centre( wx.BOTH )
def __del__( self ):
pass
def mSplitterWindowOnIdle( self, event ):
self.mSplitterWindow.SetSashPosition( 486 )
self.mSplitterWindow.Unbind( wx.EVT_IDLE )
app = wx.App()
win = Window(None)
win.Show(True)
app.MainLoop()
| Python |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
# Author: Huang Jiahua <jhuangjiahua@gmail.com>
# License: LGPLv3+
# Last modified:
'''文档格式转换
'''
import os
import subprocess
def doc2html(docfile):
'''将 mso doc 转换为 html
依赖 wv
'''
dir = os.tmpnam()
dir = dir.replace('file', 'gwrite-%s/file' % os.getlogin() )
html = 'gwrite.html'
os.makedirs(dir)
subprocess.Popen(['wvHtml', '--targetdir=%s'%dir, docfile, html]).wait()
return dir + '/' + html
| Python |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''WebkitLinkView
@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}
@license: LGPLv3+
'''
import gobject
import gtk
import webkit
import re
def proc(html):
"""处理 html 链接
>>> proc(' <a href="#3.2.1">3.2.1 heading</a>')
'<a href="#3.2.1" onDblClick="window.location.href=\'+\'+this.href;" onMouseOver="document.title=this.href;" > 3.2.1 heading</a>'
"""
return re.sub('( *)(.*?)(>)(.*)',
'''\\2 onDblClick="window.location.href='+'+this.href;" onMouseOver="document.title=this.href;" \\3\\1\\4''',
html)
def stastr(stri):
'''处理字符串的 ' "
'''
return stri.replace("\\","\\\\").replace(r'"',r'\"').replace(r"'",r"\'").replace('\n',r'\n')
class LinkTextView(webkit.WebView):
#__gtype_name__ = 'LinkTextView'
__gsignals__ = {
'url-clicked': (gobject.SIGNAL_RUN_LAST, None, (str, str)), # href, type
}
def __init__(self):
webkit.WebView.__init__(self)
self.connect("navigation-requested", self.on_navigation_requested)
#self.connect_after("populate-popup", lambda view, menu: menu.destroy()) # 暂时禁止右键菜单
self.set_property('can-focus', False)
pass
def updatehtmllinks(self, html):
self.load_html_string('''<html>
<head>
<style>
a:hover {
font-weight: bold;
border-bottom: 1px solid blue;
}
a {
width: 90%%;
text-decoration: none ;
white-space: pre;
display: block;
}
</style>
</head>
<body>
<code>%s</code>
</body>
</html>''' % proc(html), '') # 首次执行时还没 document.body 对象
self.updatehtmllinks = lambda html : self.execute_script('document.body.innerHTML="<code>%s</code>";' % stastr(proc(html))) # 保持滚动条位置
pass
def on_navigation_requested(self, widget, WebKitWebFrame, WebKitNetworkRequest):
href = WebKitNetworkRequest.get_uri()
if '#' in href:
self.emit("url-clicked", href, "link")
pass
return True
if __name__=="__main__":
main()
| Python |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''Gtk 对话框
@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}
@license: LGPLv3+
'''
import os
import gtk
import gobject
__all__ = ['error', 'info', 'inputbox', 'messagedialog', 'open', 'save', 'warning',
'yesno']
try: import i18n
except: from gettext import gettext as _
def colorbox(title="Changing color", previous_color='', current_color=''):
'''
'''
dialog = gtk.ColorSelectionDialog("Changing color")
colorsel = dialog.colorsel
if current_color:
colorsel.set_previous_color(previous_color)
if current_color:
colorsel.set_current_color(current_color)
colorsel.set_has_palette(True)
response = dialog.run()
htmlcolor = ''
if response == gtk.RESPONSE_OK:
color = colorsel.get_current_color()
rgb = (color.red, color.green, color.blue)
htmlcolor = '#' + ''.join((str(hex(i/257))[2:].rjust(2, '0') for i in rgb))
dialog.destroy()
return htmlcolor
def textbox(title='Text Box', label='Text',
parent=None, text=''):
"""display a text edit dialog
return the text , or None
"""
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
dlg.set_default_size(500,500)
#lbl = gtk.Label(label)
#lbl.set_alignment(0, 0.5)
#lbl.show()
#dlg.vbox.pack_start(lbl, False)
gscw = gtk.ScrolledWindow()
gscw.set_shadow_type(gtk.SHADOW_IN)
#gscw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
gscw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
textview=gtk.TextView(buffer=None)
buffer = textview.get_buffer()
if text:buffer.set_text(text)
#textview.show()
gscw.add(textview)
#gscw.show()
dlg.vbox.pack_start(gscw)
dlg.show_all()
resp = dlg.run()
text=buffer.get_text(buffer.get_start_iter(),buffer.get_end_iter())
dlg.destroy()
if resp == gtk.RESPONSE_OK:
return text
return None
def combobox(title='ComboBox', label='ComboBox', parent=None, texts=['']):
'''dialog with combobox
'''
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
label1 = gtk.Label(label)
label1.set_alignment(0, 0.5)
label1.set_padding(5, 5)
label1.set_line_wrap(True)
label1.show()
dlg.vbox.pack_start(label1, False, False, 0)
combobox1_List = gtk.ListStore(gobject.TYPE_STRING)
combobox1 = gtk.ComboBox()
combobox1.show()
#combobox1_List.append(["1122"])
combobox1.set_model(combobox1_List)
cell = gtk.CellRendererText()
combobox1.pack_start(cell, True)
combobox1.add_attribute(cell, 'text', 0)
dlg.vbox.pack_start(combobox1, True, True, 0)
for i in texts:
combobox1.append_text(i)
combobox1.set_active(0)
resp = dlg.run()
t = combobox1.get_active()
text = texts[t]
dlg.destroy()
if resp == gtk.RESPONSE_CANCEL:
return None
return text
def spinbox2(title='2 Spin Box', label1='value1:', label2='value2:',
parent=None, value1=3, value2=3):
"""dialog with 2 spin buttons
return (value1,value2) , or ()
"""
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
lbl = gtk.Label(title)
lbl.set_alignment(0, 0.5)
dlg.vbox.pack_start(lbl, False)
#vbox1 = gtk.VBox(False, 0)
#vbox1.show()
#vbox1.set_spacing(0)
table2 = gtk.Table()
table2.show()
table2.set_row_spacings(0)
table2.set_col_spacings(0)
label1 = gtk.Label(label1)
label1.set_alignment(0, 0.5)
label1.set_padding(0, 0)
label1.set_line_wrap(False)
label1.show()
table2.attach(label1, 0, 1, 0, 1, gtk.FILL, 0, 0, 0)
label2 = gtk.Label(label2)
label2.set_alignment(0, 0.5)
label2.set_padding(0, 0)
label2.set_line_wrap(False)
label2.show()
table2.attach(label2, 0, 1, 1, 2, gtk.FILL, 0, 0, 0)
adj = gtk.Adjustment(1.0, 1.0, 512.0, 1.0, 5.0, 0.0)
spin1=gtk.SpinButton(adj,0,0)
if value1: spin1.set_value(value1)
table2.attach(spin1, 1, 2, 0, 1, gtk.EXPAND|gtk.FILL, 0, 0, 0)
adj2 = gtk.Adjustment(1.0, 1.0, 512.0, 1.0, 5.0, 0.0)
spin2=gtk.SpinButton(adj2,0,0)
if value2: spin2.set_value(value2)
table2.attach(spin2, 1, 2, 1, 2, gtk.EXPAND|gtk.FILL, 0, 0, 0)
#vbox1.pack_start(table2, True, True, 0)
dlg.vbox.pack_start(table2)
dlg.show_all()
resp = dlg.run()
value1=spin1.get_value()
value2=spin2.get_value()
dlg.hide()
if resp == gtk.RESPONSE_CANCEL:
return ()
return (value1,value2)
def inputbox(title='Input Box', label='Please input the value',
parent=None, text=''):
"""dialog with a input entry
return text , or None
"""
#@TODO: 要直接回车确定
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
lbl = gtk.Label(label)
lbl.set_alignment(0, 0.5)
lbl.show()
dlg.vbox.pack_start(lbl)
entry = gtk.Entry()
if text: entry.set_text(text)
entry.show()
dlg.vbox.pack_start(entry, False)
dlg.set_default_response(gtk.RESPONSE_OK)
resp = dlg.run()
text = entry.get_text()
dlg.hide()
if resp == gtk.RESPONSE_CANCEL:
return None
return text
def inputbox2(title='2 Input Box', label1='value1:', label2='value2:',
parent=None, text1='', text2=''):
"""dialog with 2 input buttons
return (text1,text2) , or ()
"""
strlabel2 = label2
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
lbl = gtk.Label(title)
lbl.set_alignment(0, 0.5)
dlg.vbox.pack_start(lbl, False)
table1 = gtk.Table()
table1.show()
table1.set_row_spacings(0)
table1.set_col_spacings(0)
label2 = gtk.Label(label1)
label2.set_alignment(0.5, 0.5)
label2.set_padding(0, 0)
label2.set_line_wrap(False)
label2.show()
table1.attach(label2, 0, 1, 0, 1, gtk.FILL, 0, 0, 0)
entry2 = gtk.Entry()
entry2.set_text("")
entry2.set_editable(True)
entry2.show()
entry2.set_visibility(True)
table1.attach(entry2, 1, 2, 0, 1, gtk.EXPAND|gtk.FILL, 0, 0, 0)
label3 = gtk.Label(strlabel2)
label3.set_alignment(0, 0.5)
label3.set_padding(0, 0)
label3.set_line_wrap(False)
label3.show()
table1.attach(label3, 0, 1, 1, 2, gtk.FILL, 0, 0, 0)
entry3 = gtk.Entry()
entry3.set_text("")
entry3.set_editable(True)
entry3.show()
entry3.set_visibility(True)
table1.attach(entry3, 1, 2, 1, 2, gtk.EXPAND|gtk.FILL, 0, 0, 0)
if text1: entry2.set_text(text1)
if text2: entry3.set_text(text2)
dlg.vbox.pack_start(table1)
dlg.set_default_response(gtk.RESPONSE_OK)
dlg.show_all()
resp = dlg.run()
text1 = entry2.get_text()
text2 = entry3.get_text()
dlg.hide()
if resp == gtk.RESPONSE_CANCEL:
return ()
return (text1,text2)
def savechanges(text=_("Save Changes?"), parent=None):
'''Save Changes?
return 1, -1, 0 => yes, no, cancel
'''
d = gtk.MessageDialog(parent=parent, flags=gtk.DIALOG_MODAL,
type=gtk.MESSAGE_INFO,)
d.add_buttons(gtk.STOCK_YES, 1, gtk.STOCK_NO, -1, gtk.STOCK_CANCEL, 0)
d.set_markup(text)
d.show_all()
response = d.run()
d.destroy()
return response
def infotablebox(title=_("Info"), short=_("Info"), info=[[_("Key:"), _("Value")]], parent=None):
'''show info table box
'''
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
label = gtk.Label()
label.set_markup(short)
label.set_padding(20, 10)
label.set_alignment(0, 0)
label.show()
dlg.vbox.pack_start(label, False, False, 0)
##
table = gtk.Table()
table.show()
# table
y = 0
for line in info:
x = 0
left = 0
for text in line:
label = gtk.Label()
#label.set_selectable(1) # 会干扰编辑区选中状态
label.set_padding(10, 3)
label.set_alignment(left, 0)
label.set_markup("%s" % text)
label.show()
table.attach(label, x, x+1, y, y+1,)
x += 1
left = 1
pass
y += 1
pass
dlg.vbox.pack_start(table, False, False, 5)
response = dlg.run()
dlg.destroy()
return response
def messagedialog(dialog_type, short, long=None, parent=None,
buttons=gtk.BUTTONS_OK, additional_buttons=None):
d = gtk.MessageDialog(parent=parent, flags=gtk.DIALOG_MODAL,
type=dialog_type, buttons=buttons)
if additional_buttons:
d.add_buttons(*additional_buttons)
d.set_markup(short)
if long:
if isinstance(long, gtk.Widget):
widget = long
elif isinstance(long, basestring):
widget = gtk.Label()
widget.set_markup(long)
else:
raise TypeError("long must be a gtk.Widget or a string")
expander = gtk.Expander(_("Click here for details"))
expander.set_border_width(6)
expander.add(widget)
d.vbox.pack_end(expander)
d.show_all()
response = d.run()
d.destroy()
return response
def error(short, long=None, parent=None):
"""Displays an error message."""
return messagedialog(gtk.MESSAGE_ERROR, short, long, parent)
def info(short, long=None, parent=None):
"""Displays an info message."""
return messagedialog(gtk.MESSAGE_INFO, short, long, parent)
def warning(short, long=None, parent=None):
"""Displays a warning message."""
return messagedialog(gtk.MESSAGE_WARNING, short, long, parent)
def yesno(text="OK ?", parent=None):
"""
return 1 or 0 . ( yes/no )
"""
## return messagedialog(gtk.MESSAGE_WARNING, text, None, parent,
## buttons=gtk.BUTTONS_YES_NO)
i = messagedialog(gtk.MESSAGE_INFO, text, None, parent,
buttons=gtk.BUTTONS_YES_NO)
if i == -8:
return 1
return 0
def open(title='', parent=None,
patterns=[], mimes=[], name_mimes=[], name_patterns=[], folder=None):
"""Displays an open dialog.
return the full path , or None"""
filechooser = gtk.FileChooserDialog(title or _('Open'),
parent,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
if patterns:
file_filter = gtk.FileFilter()
for pattern in patterns:
file_filter.add_pattern(pattern)
filechooser.set_filter(file_filter)
pass
if mimes:
file_filter = gtk.FileFilter()
for mime in mimes:
file_filter.add_mime_type(mime)
filechooser.add_filter(file_filter)
pass
if name_mimes:
for name, mime in name_mimes:
file_filter = gtk.FileFilter()
file_filter.set_name(name)
file_filter.add_mime_type(mime)
filechooser.add_filter(file_filter)
if not "*" in [ i[1] for i in name_patterns]:
name_patterns += [[_("All Files"), "*"]]
pass
for name, pattern in name_patterns:
file_filter = gtk.FileFilter()
file_filter.set_name(name)
file_filter.add_pattern(pattern)
filechooser.add_filter(file_filter)
filechooser.set_default_response(gtk.RESPONSE_OK)
if folder:
filechooser.set_current_folder(folder)
response = filechooser.run()
if response != gtk.RESPONSE_OK:
filechooser.destroy()
return
path = filechooser.get_filename()
if path and os.access(path, os.R_OK):
filechooser.destroy()
return path
abspath = os.path.abspath(path)
error(_('Could not open file "%s"') % abspath,
_('The file "%s" could not be opened. '
'Permission denied.') % abspath)
filechooser.destroy()
return path
def save(title='', parent=None, current_name='',
patterns=[], mimes=[], name_mimes=[], name_patterns=[], folder=None):
"""Displays a save dialog.
return the full path , or None
"""
filechooser = gtk.FileChooserDialog(title or _('Save'),
parent,
gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
if patterns:
file_filter = gtk.FileFilter()
for pattern in patterns:
file_filter.add_pattern(pattern)
filechooser.set_filter(file_filter)
pass
if mimes:
file_filter = gtk.FileFilter()
for mime in mimes:
file_filter.add_mime_type(mime)
filechooser.add_filter(file_filter)
pass
if name_mimes:
for name, mime in name_mimes:
file_filter = gtk.FileFilter()
file_filter.set_name(name)
file_filter.add_mime_type(mime)
filechooser.add_filter(file_filter)
if not "*" in [ i[1] for i in name_patterns]:
name_patterns += [[_("All Files"), "*"]]
pass
for name, pattern in name_patterns:
file_filter = gtk.FileFilter()
file_filter.set_name(name)
file_filter.add_pattern(pattern)
filechooser.add_filter(file_filter)
if current_name:
filechooser.set_current_name(current_name)
filechooser.set_default_response(gtk.RESPONSE_OK)
if folder:
filechooser.set_current_folder(folder)
path = None
while True:
response = filechooser.run()
if response != gtk.RESPONSE_OK:
path = None
break
path = filechooser.get_filename()
if not os.path.exists(path):
break
submsg1 = _('A file named "%s" already exists') % os.path.abspath(path)
submsg2 = _('Do you which to replace it with the current project?')
text = '<span weight="bold" size="larger">%s</span>\n\n%s\n' % \
(submsg1, submsg2)
result = messagedialog(gtk.MESSAGE_ERROR,
text,
parent=parent,
buttons=gtk.BUTTONS_NONE,
additional_buttons=(gtk.STOCK_CANCEL,
gtk.RESPONSE_CANCEL,
_("Replace"),
gtk.RESPONSE_YES))
# the user want to overwrite the file
if result == gtk.RESPONSE_YES:
break
filechooser.destroy()
return path
def test():
#globals()['_'] = lambda s: s
#-print combobox(title='ComboBox', label='Combo', texts=['11','22','33'])
#-print spinbox2(title='Select the values',label1='cows:',value1=4, label2='rows:',value2=4)
#-print textbox(title='Edit The Text',label='Text',text='test text in textbox')
#-print inputbox(title='Input a Value',label='Input a value')
#-print inputbox2(title='Name and Host',label1='name:',text1='vgh', label2='host:',text2='/')
#print open(title='Open a file', patterns=['*.py'])
#-print open(title='Open a file', name_mimes={"Python Script":"text/x-python"})
#print save(title='Save a file', current_name='foobar.py')
#-print save(title='Save a file', current_name='foobar.py', name_mimes={"Python Script":"text/x-python"})
#-print info(short='This is a InfoBox', long='the long message')
#-print yesno(text='Are you OK?')
#-print savechanges()
error('An error occurred', gtk.Button('Woho'))
error('An error occurred',
'Long description bla bla bla bla bla bla bla bla bla\n'
'bla bla bla bla bla lblabl lablab bla bla bla bla bla\n'
'lbalbalbl alabl l blablalb lalba bla bla bla bla lbal\n')
if __name__ == '__main__':
test()
| Python |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''Gtk LaTex
@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}
@license: LGPLv3+
'''
import gtk, gobject
import thread
import time
import subprocess
import os, sys
import base64
try: import gtksourceview2
except: gtksourceview2 = None
try: import i18n
except: from gettext import gettext as _
latex_mark_list = [
# ["+", r" + "],
# ["<big>-</big>", r" - "],
["<b>⋅</b>", r" \cdot "],
["x", r" \times "],
["/", r" / "],
["<big><b>÷</b></big>", r" \frac { } { }"],
["a<sup>n</sup>", r"^{%s}"],
["a<sub>n</sub>", r"_{%s}"],
[" ≠ ", r" \neq "],
[" ≤ ", r" \le "],
[" ≥ ", r" \ge "],
[" ≡ ", r" \equiv "],
[" ≪ ", r" \ll "],
[" ≫ ", r" \gg "],
[" ≃ ", r" \simeq "],
[" ≈ ", r" \approx "],
["√¯", r" \sqrt[] {%s}"],
["∫", r" \int^{}_{} "],
["∬", r" \iint^{}_{} "],
["∮", r" \oint^{}_{} "],
["[ ]", r"\[ %s \]"],
["( )", r"\( %s \)"],
["{ }", r"\{ %s \}"],
["[≡]", r"""
\[
\begin{matrix}
a & b & c\\
c & e & f
\end{matrix}
\]
"""],
["(≡)", r"""
\begin{pmatrix}
a & b & c\\
c & e & f
\end{pmatrix}
"""],
["(<big> : </big>)", r"{ } \choose { } "],
["<big>(</big> x <big>)</big>", r"\left( { %s } \right)"],
[" ± ", r" \pm "],
[" ∓ ", r" \mp "],
[" ∨ ", r" \lor" ],
[" ∧ ", r" \land "],
["mod", r" \bmod "],
[" ∼ ", r" \sim "],
["∥ ", r" \parallel "],
[" ⊥ ", r" \perp "],
["<big><big>∞</big></big>", r" \infty "],
["∠", r" \angle "],
["<big><b>△</b></big>", r" \triangle "],
["∑", r" \sum_{ }^{ } "],
["lim", r"\lim_{ }"],
["⇒", r" \Rightarrow "],
["⇔", r" \Leftrightarrow "],
["∧", r" \wedge "],
["∨", r" \vee "],
["¬", r" \neg "],
["∀", r" \forall "],
["∃", r" \exists "],
["∅", r" \varnothing "],
["∈", r" \in "],
["∉", r" \notin "],
["⊆", r" \subseteq "],
["⊂", r" \subset "],
["∪", r" \cup "],
["⋂", r" \cap "],
["→", r" \to "],
["↦", r" \mapsto "],
["∏", r" \prod "],
["○", r" \circ "],
["sin", r" \sin "],
["cos", r" \cos "],
["tan", r" \tan "],
["ctan", r" \ctab "],
["asin", r" \asin "],
["acos", r" \acos "],
["atan", r" \atan "],
["actan", r" \actan "],
["log", r" \log "],
["ln", r" \ln "],
["...", r" \cdots "],
[" <sub>...</sub> ", r" \ldots "],
["<big>⁝</big>", r" \vdots "],
["<sup>.</sup>.<sub>.</sub>", r" \ddots "],
["α", r" \alpha "],
["β", r" \beta "],
["Γ", r" \Gamma "],
["γ", r" \gamma "],
["Δ", r" \Delta "],
["δ", r" \delta "],
["ϵ", r" \epsilon "],
["ε", r" \varepsilon "],
["ζ", r" \zeta "],
["η", r" \eta "],
["Θ", r" \Theta "],
["θ", r" \theta "],
["ϑ", r" \vartheta "],
["ι", r" \iota "],
["κ", r" \kappa "],
["Λ", r" \Lambda "],
["λ", r" \lambda "],
["μ", r" \mu "],
["ν", r" \nu "],
["Ξ", r" \Xi "],
["ξ", r" \xi "],
["Π", r" \Pi "],
["π", r" \pi "],
["ϖ", r" \varpi "],
["ρ", r" \rho "],
["ϱ", r" \varrho "],
["Σ", r" \Sigma "],
["σ", r" \sigma "],
["ς", r" \varsigma "],
["τ", r" \tau "],
["Υ", r" \Upsilon "],
["υ", r" \upsilon "],
["Φ", r" \Phi "],
["ϕ", r" \phi "],
["φ", r" \varphi "],
["χ", r" \chi "],
["Ψ", r" \Psi "],
["ψ", r" \psi "],
["Ω", r" \Omega "],
["ω", r" \omega "],
]
class GtkToolBoxView(gtk.TextView):
'''流式布局 ToolBox
'''
def __init__(self, latex=""):
'''初始化
'''
self.__gobject_init__()
self.unset_flags(gtk.CAN_FOCUS)
self.set_editable(0)
self.set_wrap_mode(gtk.WRAP_WORD)
self.connect('realize', self.on_realize)
pass
def on_realize(self, *args):
## 将默认 I 形鼠标指针换成箭头
self.get_window(gtk.TEXT_WINDOW_TEXT).set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
pass
def add(self, widget):
'''插入 Widget
'''
buffer = self.get_buffer()
iter = buffer.get_end_iter()
anchor = buffer.create_child_anchor(iter)
buffer.insert(iter, "")
widget.set_data('buffer_anchor', anchor)
self.add_child_at_anchor(widget, anchor)
pass
def remove(self, widget):
'''删除 widget
'''
anchor = widget.get_data('buffer_anchor')
if anchor:
buffer = self.get_buffer()
start = buffer.get_iter_at_child_anchor(anchor)
end = buffer.get_iter_at_offset( start.get_offset() + 1 )
buffer.delete(start, end)
pass
pass
class LatexMathExpressionsEditor(gtk.Table):
'''LaTex 数学公式编辑器
'''
def __init__(self, latex=""):
'''初始化
'''
self.__gobject_init__()
self.set_row_spacings(10)
self.set_col_spacings(10)
## latex edit
scrolledwindow1 = gtk.ScrolledWindow()
scrolledwindow1.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolledwindow1.show()
scrolledwindow1.set_shadow_type(gtk.SHADOW_IN)
if gtksourceview2:
self.latex_textview = gtksourceview2.View()
lm = gtksourceview2.language_manager_get_default()
language = lm.get_language('latex')
buffer = gtksourceview2.Buffer()
buffer.set_highlight_syntax(1)
buffer.set_language(language)
self.latex_textview.set_buffer(buffer)
pass
else:
self.latex_textview = gtk.TextView()
pass
self.latex_textview.set_wrap_mode(gtk.WRAP_WORD)
self.latex_textview.set_cursor_visible(True)
self.latex_textview.set_indent(5)
self.latex_textview.set_editable(True)
self.latex_textview.show()
#self.latex_textview.set_size_request(302, 200)
buffer = self.latex_textview.get_buffer()
buffer.set_text(latex)
scrolledwindow1.add(self.latex_textview)
self.attach(scrolledwindow1, 0, 1, 0, 1)
## latex preview
self.latex_image = gtk.Image()
#self.latex_image.set_size_request(200, 100)
self.latex_image.set_padding(0, 0)
self.latex_image.show()
box = gtk.EventBox()
box.show()
box.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color("#FFFFFF"))
box.add(self.latex_image)
self.attach(box, 0, 1, 1, 2)
## toolbox
toolview = GtkToolBoxView()
toolview.show()
#toolview.set_size_request(302, 200)
for text, mark in latex_mark_list:
label = gtk.Label()
label.set_markup(text)
label.set_size_request(30, 20)
label.show()
button = gtk.Button()
button.unset_flags(gtk.CAN_FOCUS)
button.add(label)
button.set_relief(gtk.RELIEF_NONE)
button.connect("clicked", self.on_insert_tex_mark, text, mark)
button.set_tooltip_text(mark)
button.show()
toolview.add(button)
pass
scrolledwindow2 = gtk.ScrolledWindow()
#scrolledwindow2.set_size_request(300, 400)
scrolledwindow2.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolledwindow2.show()
scrolledwindow2.set_shadow_type(gtk.SHADOW_IN)
scrolledwindow2.add(toolview)
self.attach(scrolledwindow2, 1, 2, 0, 2)
self.show_all()
thread.start_new_thread(self._up_preview, ())
pass
def get_latex(self, *args):
'''获取 LaTex
'''
buffer = self.latex_textview.get_buffer()
return buffer.get_text(buffer.get_start_iter(),buffer.get_end_iter())
def set_pic(self, data):
'''设置图像
'''
if not data:
return self.latex_image.set_from_stock(gtk.STOCK_DIALOG_ERROR, 2)
pix = gtk.gdk.PixbufLoader()
pix.write(data)
pix.close()
self.latex_image.set_from_pixbuf(pix.get_pixbuf())
return
def _up_preview(self, *args):
'''用于定时更新预览
'''
old_latex = ""
while True:
time.sleep(1)
if not self.get_window():
break
latex = self.get_latex()
if latex == old_latex:
continue
pic = tex2gif(latex, 1)
old_latex = self.get_latex()
if latex == self.get_latex():
gobject.idle_add(self.set_pic, pic)
pass
pass
#-print 'done'
return
def up_preview(self, pic):
'''更新预览'''
return
def insert_latex_mark(self, view, mark, text=""):
'''在 gtk.TextView 插入 LaTex 标记
'''
buffer = view.get_buffer()
bounds = buffer.get_selection_bounds()
select = bounds and buffer.get_slice(bounds[0], bounds[1]) or text
if mark.count("%") == 1:
mark = mark % select
pass
else:
mark = mark + select
pass
buffer.delete_selection(1, 1)
buffer.insert_at_cursor(mark)
pass
def on_insert_tex_mark(self, widget, text, mark):
print 'on_insert_tex_mark:', text, mark
self.insert_latex_mark(self.latex_textview, mark)
pass
def latex_dlg(latex="", title=_("LaTeX math expressions"), parent=None):
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
dlg.set_default_size(680, 400)
editor = LatexMathExpressionsEditor(latex)
dlg.vbox.pack_start(editor, True, True, 5)
dlg.show_all()
resp = dlg.run()
latex = editor.get_latex()
dlg.destroy()
if resp == gtk.RESPONSE_OK:
return latex
return None
def stastr(stri):
'''处理字符串的 ' "
'''
return stri.replace("\\","\\\\").replace(r'"',r'\"').replace(r"'",r"\'").replace('\n',r'\n')
def tex2gif(tex, transparent=1):
'''将 latex 数学公式转为 gif 图片,依赖 mimetex
mimetex -d -s 7 '公式'
'''
if transparent:
cmd = ['mimetex', '-d', '-s', '4', tex]
pass
else:
cmd = ['mimetex', '-d', '-o', '-s', '4', tex]
pass
i = subprocess.Popen(cmd, stdout=subprocess.PIPE)
gif = i.communicate()[0]
if gif.startswith('GIF'):
return gif
return ""
def gif2base64(gif):
'''将 gif 图像转为 base64 内联图像
'''
return 'data:image/gif;base64,%s' % base64.encodestring(gif).replace('\n', '')
def tex2base64(tex):
'''将 latex 数学公式转为 base64 内联图像
'''
return gif2base64(tex2gif(tex))
def tex2html(tex):
'''将 latex 数学公式转为 base64 内联图像
'''
return '<img alt="mimetex:%s" onDblClick="if(uptex) uptex(this);" style="vertical-align: middle; position: relative; top: -5pt; border: 0;" src="%s" />' % (stastr(tex), gif2base64(tex2gif(tex)))
if __name__=="__main__":
gtk.gdk.threads_init()
latex = ' '.join(sys.argv[1:]) or 'E=MC^2'
latex = latex_dlg(latex)
print latex
#print tex2html(latex)
pass
| Python |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''GWrite
@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}
@license: LGPLv3+
'''
__version__ = '0.5.1'
import gtk, gobject
import gtkdialogs
import gtklatex
import config
import os, sys
import thread
import re
import urllib2
try: import i18n
except: from gettext import gettext as _
def get_doctitle(html):
title = ''
title = (re.findall(r'''<title>([^\0]*)</title>''', html)+[_("NewDocument")])[0]
return title
def proc_webkit_color(*webviews):
## 设置样式,让 WebKit 背景色使用 Gtk 颜色
style = webviews[0].get_style()
html_bg_color = str(style.base[gtk.STATE_NORMAL])
html_fg_color = str(style.text[gtk.STATE_NORMAL])
user_stylesheet = ''' html {
background-color: %s;
color: %s;\n}''' % (html_bg_color, html_fg_color)
user_stylesheet_file = config.user_stylesheet_file
file(user_stylesheet_file, 'w').write(user_stylesheet)
user_stylesheet_uri = 'file://' + user_stylesheet_file
for webview in webviews:
settings = webview.get_settings()
settings.set_property('user-stylesheet-uri', user_stylesheet_uri)
pass
def menu_find_with_stock(menu, stock):
# 查找菜单中对应 stock 的菜单项位置
n = 0
for i in menu.get_children():
try:
if i.get_image().get_stock()[0] == stock:
return n
except:
pass
n += 1
pass
return -1
Windows = []
new_num = 1
Title = _("GWrite")
## 是否单实例模式
#single_instance_mode = 0
#mdi_mode = 1
class MainWindow:
def __init__(self, editfile='', create = True, accel_group = None):
self.editfile = editfile
## 考虑已经打开文档的情况
if editfile:
for i in Windows:
if i.editfile == editfile:
#-print _('File "%s" already opened') % editfile
i.window.show()
i.window.present()
#@TODO: 让 edit 获得焦点
i.window.grab_focus()
i.edit.grab_focus()
del self
return
pass
pass
##
Windows.append(self)
import webkitedit # 推迟 import webkitedit
##
if accel_group is None:
self.accel_group = gtk.AccelGroup()
else:
self.accel_group = accel_group
if create:
self.window = gtk.Window()
gtk.window_set_default_icon_name("gtk-dnd")
self.window.set_icon_name("gtk-dnd")
self.window.set_default_size(780, 550)
self.window.set_title(Title)
if editfile: self.window.set_title(os.path.basename(self.editfile) + ' - ' + Title)
self.window.add_accel_group(self.accel_group)
self.window.show()
self.window.connect("delete_event", self.on_close)
## 用 Alt-1, Alt-2... 来切换标签页,gtk.gdk.MOD1_MASK 是 Alt
for k in range(1, 10):
self.accel_group.connect_group(gtk.gdk.keyval_from_name(str(k)), gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE, self.on_accel_connect_group)
pass
self.vbox1 = gtk.VBox(False, 0)
self.vbox1.show()
menubar1 = gtk.MenuBar()
menubar1.show()
menuitem_file = gtk.MenuItem(_("_File"))
menuitem_file.show()
menu_file = gtk.Menu()
menu_file.append(gtk.TearoffMenuItem())
self.menu_file = menu_file
menuitem_new = gtk.ImageMenuItem("gtk-new")
menuitem_new.show()
menuitem_new.connect("activate", self.on_new)
menuitem_new.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("n"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_new)
if config.mdi_mode:
menuitem_new_window = gtk.ImageMenuItem(_("New _Window"))
menuitem_new_window.show()
img = gtk.image_new_from_stock(gtk.STOCK_NEW, gtk.ICON_SIZE_MENU)
menuitem_new_window.set_image(img)
menuitem_new_window.connect("activate", self.on_new_window)
menuitem_new_window.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("n"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_new_window)
pass
menuitem_open = gtk.ImageMenuItem("gtk-open")
menuitem_open.show()
menuitem_open.connect("activate", self.on_open)
menuitem_open.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("o"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_open)
menuitem_save = gtk.ImageMenuItem("gtk-save")
menuitem_save.show()
menuitem_save.connect("activate", self.on_save)
menuitem_save.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("s"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_save)
menuitem_save_as = gtk.ImageMenuItem("gtk-save-as")
menuitem_save_as.show()
menuitem_save_as.connect("activate", self.on_save_as)
menu_file.append(menuitem_save_as)
menu_file.append(gtk.MenuItem())
menuitem = gtk.ImageMenuItem("gtk-properties")
menuitem.show()
menuitem.connect("activate", self.on_word_counts)
menu_file.append(menuitem)
menuitem_print = gtk.ImageMenuItem("gtk-print")
menuitem_print.show()
menuitem_print.connect("activate", self.on_print)
menuitem_print.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("p"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_print)
menu_file.append(gtk.MenuItem())
## 最近使用文件菜单 ################
self.recent = gtk.RecentManager()
menu_recent = gtk.RecentChooserMenu(self.recent)
menu_recent.set_limit(25)
#if editfile: self.add_recent(editfile) #改在 new_edit() 里统一添加
##
self.file_filter = gtk.RecentFilter()
self.file_filter.add_mime_type("text/html")
menu_recent.set_filter(self.file_filter)
menu_recent.connect("item-activated", self.on_select_recent)
menuitem_recent = gtk.ImageMenuItem(_("_Recently"))
menuitem_recent.set_image(gtk.image_new_from_icon_name("document-open-recent", gtk.ICON_SIZE_MENU))
menuitem_recent.set_submenu(menu_recent)
menu_file.append(menuitem_recent)
#####################################
menuitem_separatormenuitem1 = gtk.MenuItem()
menuitem_separatormenuitem1.show()
menu_file.append(menuitem_separatormenuitem1)
menuitem_close = gtk.ImageMenuItem("gtk-close")
menuitem_close.show()
menuitem_close.connect("activate", self.close_tab)
menuitem_close.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("w"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_close)
if config.mdi_mode:
menuitem_close_window = gtk.ImageMenuItem(_("Close Win_dow"))
menuitem_close_window.show()
img = gtk.image_new_from_stock(gtk.STOCK_CLOSE, gtk.ICON_SIZE_MENU)
menuitem_close_window.set_image(img)
menuitem_close_window.connect("activate", self.on_close)
menuitem_close_window.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("w"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_close_window)
pass
menuitem_quit = gtk.ImageMenuItem("gtk-quit")
menuitem_quit.show()
menuitem_quit.connect("activate", self.on_quit)
menuitem_quit.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("q"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_quit)
menuitem_file.set_submenu(menu_file)
menubar1.append(menuitem_file)
menuitem_edit = gtk.MenuItem(_("_Edit"))
menuitem_edit.show()
menu_edit = gtk.Menu()
menu_edit.append(gtk.TearoffMenuItem())
menuitem_undo = gtk.ImageMenuItem("gtk-undo")
menuitem_undo.show()
menuitem_undo.connect("activate", self.do_undo)
menu_edit.append(menuitem_undo)
menuitem_redo = gtk.ImageMenuItem("gtk-redo")
menuitem_redo.show()
menuitem_redo.connect("activate", self.do_redo)
menu_edit.append(menuitem_redo)
menuitem_separator2 = gtk.MenuItem()
menuitem_separator2.show()
menu_edit.append(menuitem_separator2)
menuitem_cut = gtk.ImageMenuItem("gtk-cut")
menuitem_cut.show()
menuitem_cut.connect("activate", self.do_cut)
menu_edit.append(menuitem_cut)
menuitem_copy = gtk.ImageMenuItem("gtk-copy")
menuitem_copy.show()
menuitem_copy.connect("activate", self.do_copy)
menu_edit.append(menuitem_copy)
menuitem_paste = gtk.ImageMenuItem("gtk-paste")
menuitem_paste.show()
menuitem_paste.connect("activate", self.do_paste)
menu_edit.append(menuitem_paste)
menuitem_paste_unformatted = gtk.ImageMenuItem(_("Pa_ste Unformatted"))
menuitem_paste_unformatted.show()
menuitem_paste_unformatted.connect("activate", self.do_paste_unformatted)
menuitem_paste_unformatted.add_accelerator("activate",
self.accel_group, gtk.gdk.keyval_from_name("v"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_edit.append(menuitem_paste_unformatted)
menuitem_delete = gtk.ImageMenuItem("gtk-delete")
menuitem_delete.show()
menuitem_delete.connect("activate", self.do_delete)
menu_edit.append(menuitem_delete)
menuitem_separator3 = gtk.MenuItem()
menuitem_separator3.show()
menu_edit.append(menuitem_separator3)
menuitem_select_all = gtk.ImageMenuItem("gtk-select-all")
menuitem_select_all.show()
menuitem_select_all.connect("activate", self.do_selectall)
menu_edit.append(menuitem_select_all)
menuitem_separator12 = gtk.MenuItem()
menuitem_separator12.show()
menu_edit.append(menuitem_separator12)
menuitem_find = gtk.ImageMenuItem("gtk-find")
menuitem_find.show()
menuitem_find.connect("activate", self.show_findbar)
menuitem_find.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("f"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_edit.append(menuitem_find)
menuitem_find_and_replace = gtk.ImageMenuItem("gtk-find-and-replace")
menuitem_find_and_replace.show()
menuitem_find_and_replace.connect("activate", self.show_findbar)
menu_edit.append(menuitem_find_and_replace)
##
menu_edit.append(gtk.MenuItem())
menuitem = gtk.ImageMenuItem("gtk-preferences")
menuitem.show()
menuitem.connect("activate", lambda *i: (config.show_preference_dlg(), config.write()))
menu_edit.append(menuitem)
##
menuitem_edit.set_submenu(menu_edit)
menubar1.append(menuitem_edit)
menuitem_view = gtk.MenuItem(_("_View"))
menuitem_view.show()
menu_view = gtk.Menu()
menu_view.append(gtk.TearoffMenuItem())
## 缩放菜单
menuitem_zoom_in = gtk.ImageMenuItem(gtk.STOCK_ZOOM_IN)
menuitem_zoom_in.connect("activate", self.zoom_in)
# Ctrl++
menuitem_zoom_in.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("equal"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menuitem_zoom_in.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("plus"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menuitem_zoom_in.show()
menu_view.append(menuitem_zoom_in)
menuitem_zoom_out = gtk.ImageMenuItem(gtk.STOCK_ZOOM_OUT)
menuitem_zoom_out.connect("activate", self.zoom_out)
# Ctrl+-
menuitem_zoom_out.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("minus"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menuitem_zoom_out.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("underscore"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menuitem_zoom_out.show()
menu_view.append(menuitem_zoom_out)
menuitem_zoom_100 = gtk.ImageMenuItem(gtk.STOCK_ZOOM_100)
menuitem_zoom_100.connect("activate", self.zoom_100)
# Ctrl+0
menuitem_zoom_100.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("0"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menuitem_zoom_100.show()
menu_view.append(menuitem_zoom_100)
##
menuitem_separator10 = gtk.MenuItem()
menuitem_separator10.show()
menu_view.append(menuitem_separator10)
menuitem_update_contents = gtk.ImageMenuItem(_("Update _Contents"))
menuitem_update_contents.show()
menuitem_update_contents.connect("activate", self.view_update_contents)
img = gtk.image_new_from_stock(gtk.STOCK_INDEX, gtk.ICON_SIZE_MENU)
menuitem_update_contents.set_image(img)
menu_view.append(menuitem_update_contents)
menuitem_toggle_numbered_title = gtk.ImageMenuItem(_("Toggle _Numbered Title"))
menuitem_toggle_numbered_title.show()
menuitem_toggle_numbered_title.connect("activate", self.view_toggle_autonumber)
img = gtk.image_new_from_stock(gtk.STOCK_SORT_DESCENDING, gtk.ICON_SIZE_MENU)
menuitem_toggle_numbered_title.set_image(img)
menu_view.append(menuitem_toggle_numbered_title)
menuitem_update_images = gtk.ImageMenuItem(_("Update _Images"))
menuitem_update_images.show()
menuitem_update_images.connect("activate", self.do_update_images)
img = gtk.image_new_from_icon_name('stock_insert_image', gtk.ICON_SIZE_MENU)
menuitem_update_images.set_image(img)
menu_view.append(menuitem_update_images)
menuitem_separator10 = gtk.MenuItem()
menuitem_separator10.show()
menu_view.append(menuitem_separator10)
menuitem_view_source = gtk.ImageMenuItem(_("So_urce/Visual"))
menuitem_view_source.show()
menuitem_view_source.connect("activate", self.view_sourceview)
menuitem_view_source.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("u"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_view-html-source', gtk.ICON_SIZE_MENU)
menuitem_view_source.set_image(img)
menu_view.append(menuitem_view_source)
menuitem_view.set_submenu(menu_view)
menubar1.append(menuitem_view)
menuitem_insert = gtk.MenuItem(_("_Insert"))
menuitem_insert.show()
menu_insert = gtk.Menu()
menu_insert.append(gtk.TearoffMenuItem())
menuitem_picture = gtk.ImageMenuItem(_("_Picture"))
menuitem_picture.show()
menuitem_picture.connect("activate", self.do_insertimage)
img = gtk.image_new_from_icon_name('stock_insert_image', gtk.ICON_SIZE_MENU)
menuitem_picture.set_image(img)
menu_insert.append(menuitem_picture)
menuitem_link = gtk.ImageMenuItem(_("_Link"))
menuitem_link.show()
menuitem_link.connect("activate", self.do_createlink)
menuitem_link.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("k"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_link', gtk.ICON_SIZE_MENU)
menuitem_link.set_image(img)
menu_insert.append(menuitem_link)
menuitem_horizontalrule = gtk.ImageMenuItem(_("Horizontal_Rule"))
menuitem_horizontalrule.show()
menuitem_horizontalrule.connect("activate", self.do_inserthorizontalrule)
img = gtk.image_new_from_icon_name('stock_insert-rule', gtk.ICON_SIZE_MENU)
menuitem_horizontalrule.set_image(img)
menu_insert.append(menuitem_horizontalrule)
menuitem_insert_table = gtk.ImageMenuItem(_("_Table"))
menuitem_insert_table.show()
menuitem_insert_table.connect("activate", self.do_insert_table)
img = gtk.image_new_from_icon_name('stock_insert-table', gtk.ICON_SIZE_MENU)
menuitem_insert_table.set_image(img)
menu_insert.append(menuitem_insert_table)
menuitem_insert_html = gtk.ImageMenuItem(_("_HTML"))
menuitem_insert_html.show()
menuitem_insert_html.connect("activate", self.do_insert_html)
img = gtk.image_new_from_icon_name('stock_view-html-source', gtk.ICON_SIZE_MENU)
menuitem_insert_html.set_image(img)
menu_insert.append(menuitem_insert_html)
menuitem_separator9 = gtk.MenuItem()
menuitem_separator9.show()
menu_insert.append(menuitem_separator9)
##
menuitem_latex_math_equation = gtk.ImageMenuItem(_("LaTeX _Equation"))
menuitem_latex_math_equation.show()
menuitem_latex_math_equation.connect("activate", self.do_insert_latex_math_equation)
menu_insert.append(menuitem_latex_math_equation)
menu_insert.append(gtk.MenuItem())
##
menuitem_insert_contents = gtk.ImageMenuItem(_("_Contents"))
menuitem_insert_contents.show()
menuitem_insert_contents.connect("activate", self.do_insert_contents)
img = gtk.image_new_from_stock(gtk.STOCK_INDEX, gtk.ICON_SIZE_MENU)
menuitem_insert_contents.set_image(img)
menu_insert.append(menuitem_insert_contents)
menuitem_insert.set_submenu(menu_insert)
menubar1.append(menuitem_insert)
menuitem_style = gtk.MenuItem(_("_Style"))
menuitem_style.show()
menu_style = gtk.Menu()
menu_style.append(gtk.TearoffMenuItem())
menuitem_normal = gtk.ImageMenuItem(_("_Normal"))
menuitem_normal.show()
menuitem_normal.connect("activate", self.do_formatblock_p)
menuitem_normal.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("0"), gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_insert_section', gtk.ICON_SIZE_MENU)
menuitem_normal.set_image(img)
menu_style.append(menuitem_normal)
menuitem_separator4 = gtk.MenuItem()
menuitem_separator4.show()
menu_style.append(menuitem_separator4)
menuitem_heading_1 = gtk.ImageMenuItem(_("Heading _1"))
menuitem_heading_1.show()
menuitem_heading_1.connect("activate", self.do_formatblock_h1)
menuitem_heading_1.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("1"), gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_insert-header', gtk.ICON_SIZE_MENU)
menuitem_heading_1.set_image(img)
menu_style.append(menuitem_heading_1)
menuitem_heading_2 = gtk.ImageMenuItem(_("Heading _2"))
menuitem_heading_2.show()
menuitem_heading_2.connect("activate", self.do_formatblock_h2)
menuitem_heading_2.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("2"), gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_line-spacing-2', gtk.ICON_SIZE_MENU)
menuitem_heading_2.set_image(img)
menu_style.append(menuitem_heading_2)
menuitem_heading_3 = gtk.ImageMenuItem(_("Heading _3"))
menuitem_heading_3.show()
menuitem_heading_3.connect("activate", self.do_formatblock_h3)
menuitem_heading_3.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("3"), gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_line-spacing-1', gtk.ICON_SIZE_MENU)
menuitem_heading_3.set_image(img)
menu_style.append(menuitem_heading_3)
menuitem_heading_4 = gtk.ImageMenuItem(_("Heading _4"))
menuitem_heading_4.show()
menuitem_heading_4.connect("activate", self.do_formatblock_h4)
menuitem_heading_4.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("4"), gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_line-spacing-1.5', gtk.ICON_SIZE_MENU)
menuitem_heading_4.set_image(img)
menu_style.append(menuitem_heading_4)
menuitem_heading_5 = gtk.ImageMenuItem(_("Heading _5"))
menuitem_heading_5.show()
menuitem_heading_5.connect("activate", self.do_formatblock_h5)
menuitem_heading_5.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("5"), gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_list_enum-off', gtk.ICON_SIZE_MENU)
menuitem_heading_5.set_image(img)
menu_style.append(menuitem_heading_5)
menuitem_heading_6 = gtk.ImageMenuItem(_("Heading _6"))
menuitem_heading_6.show()
menuitem_heading_6.connect("activate", self.do_formatblock_h6)
menuitem_heading_6.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("6"), gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_list_enum-off', gtk.ICON_SIZE_MENU)
menuitem_heading_6.set_image(img)
menu_style.append(menuitem_heading_6)
menuitem_separator5 = gtk.MenuItem()
menuitem_separator5.show()
menu_style.append(menuitem_separator5)
menuitem_bulleted_list = gtk.ImageMenuItem(_("_Bulleted List"))
menuitem_bulleted_list.show()
menuitem_bulleted_list.connect("activate", self.do_insertunorderedlist)
img = gtk.image_new_from_icon_name('stock_list_bullet', gtk.ICON_SIZE_MENU)
menuitem_bulleted_list.set_image(img)
menu_style.append(menuitem_bulleted_list)
menuitem_numbered_list = gtk.ImageMenuItem(_("Numbered _List"))
menuitem_numbered_list.show()
menuitem_numbered_list.connect("activate", self.do_insertorderedlist)
img = gtk.image_new_from_icon_name('stock_list_enum', gtk.ICON_SIZE_MENU)
menuitem_numbered_list.set_image(img)
menu_style.append(menuitem_numbered_list)
menuitem_separator6 = gtk.MenuItem()
menuitem_separator6.show()
menu_style.append(menuitem_separator6)
div1 = gtk.ImageMenuItem(_("Di_v"))
div1.show()
div1.connect("activate", self.do_formatblock_div)
img = gtk.image_new_from_icon_name('stock_tools-hyphenation', gtk.ICON_SIZE_MENU)
div1.set_image(img)
menu_style.append(div1)
address1 = gtk.ImageMenuItem(_("A_ddress"))
address1.show()
address1.connect("activate", self.do_formatblock_address)
img = gtk.image_new_from_icon_name('stock_tools-hyphenation', gtk.ICON_SIZE_MENU)
address1.set_image(img)
menu_style.append(address1)
#menuitem_formatblock_code = gtk.ImageMenuItem(_("_Code"))
#menuitem_formatblock_code.show()
#menuitem_formatblock_code.connect("activate", self.do_formatblock_code)
#
#img = gtk.image_new_from_icon_name('stock_text-monospaced', gtk.ICON_SIZE_MENU)
#menuitem_formatblock_code.set_image(img)
#menu_style.append(menuitem_formatblock_code)
menuitem_formatblock_blockquote = gtk.ImageMenuItem(_("Block_quote"))
menuitem_formatblock_blockquote.show()
menuitem_formatblock_blockquote.connect("activate", self.do_formatblock_blockquote)
img = gtk.image_new_from_icon_name('stock_list-insert-unnumbered', gtk.ICON_SIZE_MENU)
menuitem_formatblock_blockquote.set_image(img)
menu_style.append(menuitem_formatblock_blockquote)
menuitem_formatblock_pre = gtk.ImageMenuItem(_("_Preformat"))
menuitem_formatblock_pre.show()
menuitem_formatblock_pre.connect("activate", self.do_formatblock_pre)
menuitem_formatblock_pre.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("t"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_text-quickedit', gtk.ICON_SIZE_MENU)
menuitem_formatblock_pre.set_image(img)
menu_style.append(menuitem_formatblock_pre)
menuitem_style.set_submenu(menu_style)
menubar1.append(menuitem_style)
menuitem_format = gtk.MenuItem(_("For_mat"))
menuitem_format.show()
menu_format = gtk.Menu()
menu_format.append(gtk.TearoffMenuItem())
menuitem_bold = gtk.ImageMenuItem("gtk-bold")
menuitem_bold.show()
menuitem_bold.connect("activate", self.on_bold)
menuitem_bold.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("b"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_bold)
menuitem_italic = gtk.ImageMenuItem("gtk-italic")
menuitem_italic.show()
menuitem_italic.connect("activate", self.do_italic)
menuitem_italic.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("i"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_italic)
menuitem_underline = gtk.ImageMenuItem("gtk-underline")
menuitem_underline.show()
menuitem_underline.connect("activate", self.do_underline)
menuitem_underline.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("u"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_underline)
menuitem_strikethrough = gtk.ImageMenuItem("gtk-strikethrough")
menuitem_strikethrough.show()
menuitem_strikethrough.connect("activate", self.do_strikethrough)
menu_format.append(menuitem_strikethrough)
self.separator7 = gtk.MenuItem()
self.separator7.show()
menu_format.append(self.separator7)
menuitem_font_fontname = gtk.ImageMenuItem("gtk-select-font")
menuitem_font_fontname.show()
#menuitem_font_fontname.connect("activate", self.do_font_fontname)
## 字体列表菜单 #########################################
self.fontname_menu = gtk.Menu()
self.fontname_menu.append(gtk.TearoffMenuItem())
fontnames = sorted(( familie.get_name() for familie in gtk.Label().get_pango_context().list_families() ))
## 调整字体列表顺序,将中文字体提至前列
for fontname in fontnames:
try:
fontname.decode('ascii')
pass
except:
fontnames.remove(fontname)
fontnames.insert(0, fontname)
pass
pass
for fontname in ['Serif', 'Sans', 'Sans-serif', 'Monospace', ''] + fontnames:
if fontname:
menu = gtk.MenuItem(fontname)
menu.connect("activate", self.do_font_fontname, fontname)
pass
else:
menu = gtk.MenuItem()
pass
menu.show()
self.fontname_menu.append(menu)
pass
self.fontname_menu.show()
menuitem_font_fontname.set_submenu(self.fontname_menu)
###########################################
menu_format.append(menuitem_font_fontname)
menuitem_font_size = gtk.ImageMenuItem(_("Font _Size"))
menuitem_font_size.show()
img = gtk.image_new_from_icon_name('stock_font-size', gtk.ICON_SIZE_MENU)
menuitem_font_size.set_image(img)
self.font_size1_menu = gtk.Menu()
self.font_size1_menu.append(gtk.TearoffMenuItem())
menuitem_fontsize_1 = gtk.MenuItem(_("_1"))
menuitem_fontsize_1.show()
menuitem_fontsize_1.connect("activate", self.do_fontsize_1)
self.font_size1_menu.append(menuitem_fontsize_1)
menuitem_fontsize_2 = gtk.MenuItem(_("_2"))
menuitem_fontsize_2.show()
menuitem_fontsize_2.connect("activate", self.do_fontsize_2)
self.font_size1_menu.append(menuitem_fontsize_2)
menuitem_fontsize_3 = gtk.MenuItem(_("_3"))
menuitem_fontsize_3.show()
menuitem_fontsize_3.connect("activate", self.do_fontsize_3)
self.font_size1_menu.append(menuitem_fontsize_3)
menuitem_fontsize_4 = gtk.MenuItem(_("_4"))
menuitem_fontsize_4.show()
menuitem_fontsize_4.connect("activate", self.do_fontsize_4)
self.font_size1_menu.append(menuitem_fontsize_4)
menuitem_fontsize_5 = gtk.MenuItem(_("_5"))
menuitem_fontsize_5.show()
menuitem_fontsize_5.connect("activate", self.do_fontsize_5)
self.font_size1_menu.append(menuitem_fontsize_5)
menuitem_fontsize_6 = gtk.MenuItem(_("_6"))
menuitem_fontsize_6.show()
menuitem_fontsize_6.connect("activate", self.do_fontsize_6)
self.font_size1_menu.append(menuitem_fontsize_6)
menuitem_fontsize_7 = gtk.MenuItem(_("_7"))
menuitem_fontsize_7.show()
menuitem_fontsize_7.connect("activate", self.do_fontsize_7)
self.font_size1_menu.append(menuitem_fontsize_7)
menuitem_font_size.set_submenu(self.font_size1_menu)
menu_format.append(menuitem_font_size)
menuitem_color = gtk.ImageMenuItem("gtk-select-color")
menuitem_color.show()
menuitem_color.connect("activate", self.on_color_select_forecolor)
menu_format.append(menuitem_color)
menuitem_bg_color = gtk.ImageMenuItem(_("_Highlight"))
menuitem_bg_color.show()
menuitem_bg_color.connect("activate", self.do_color_hilitecolor)
menuitem_bg_color.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("h"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_text_color_hilight', gtk.ICON_SIZE_MENU)
menuitem_bg_color.set_image(img)
menu_format.append(menuitem_bg_color)
menuitem_bg_color_select = gtk.ImageMenuItem(_("_HiliteColor"))
menuitem_bg_color_select.show()
menuitem_bg_color_select.connect("activate", self.on_color_select_hilitecolor)
img = gtk.image_new_from_stock(gtk.STOCK_SELECT_COLOR, gtk.ICON_SIZE_MENU)
menuitem_bg_color_select.set_image(img)
menu_format.append(menuitem_bg_color_select)
menuitem_clearformat = gtk.ImageMenuItem(_("_Clear format"))
img = gtk.image_new_from_icon_name("gtk-clear", gtk.ICON_SIZE_MENU)
menuitem_clearformat.set_image(img)
menuitem_clearformat.show()
menuitem_clearformat.connect("activate", self.do_removeformat)
menuitem_clearformat.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("backslash"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_clearformat)
self.separator8 = gtk.MenuItem()
self.separator8.show()
menu_format.append(self.separator8)
menuitem_justifyleft = gtk.ImageMenuItem("gtk-justify-left")
menuitem_justifyleft.show()
menuitem_justifyleft.connect("activate", self.do_justifyleft)
menuitem_justifyleft.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("l"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_justifyleft)
menuitem_justifycenter = gtk.ImageMenuItem("gtk-justify-center")
menuitem_justifycenter.show()
menuitem_justifycenter.connect("activate", self.do_justifycenter)
menuitem_justifycenter.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("e"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_justifycenter)
menuitem_justifyright = gtk.ImageMenuItem("gtk-justify-right")
menuitem_justifyright.show()
menuitem_justifyright.connect("activate", self.do_justifyright)
menuitem_justifyright.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("r"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_justifyright)
menuitem_justifyfull = gtk.ImageMenuItem("gtk-justify-fill")
menuitem_justifyfull.show()
menuitem_justifyfull.connect("activate", self.do_justifyfull)
menuitem_justifyfull.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("j"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_justifyfull)
self.separator11 = gtk.MenuItem()
self.separator11.show()
menu_format.append(self.separator11)
menuitem_increase_indent = gtk.ImageMenuItem("gtk-indent")
menuitem_increase_indent.show()
menuitem_increase_indent.connect("activate", self.do_indent)
menu_format.append(menuitem_increase_indent)
menuitem_decrease_indent = gtk.ImageMenuItem("gtk-unindent")
menuitem_decrease_indent.show()
menuitem_decrease_indent.connect("activate", self.do_outdent)
menu_format.append(menuitem_decrease_indent)
self.separator16 = gtk.MenuItem()
self.separator16.show()
menu_format.append(self.separator16)
menuitem_superscript = gtk.ImageMenuItem(_("Su_perscript"))
menuitem_superscript.show()
menuitem_superscript.connect("activate", self.do_superscript)
menuitem_superscript.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("period"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_superscript', gtk.ICON_SIZE_MENU)
menuitem_superscript.set_image(img)
menu_format.append(menuitem_superscript)
menuitem_subscript = gtk.ImageMenuItem(_("Subs_cript"))
menuitem_subscript.show()
menuitem_subscript.connect("activate", self.do_subscript)
menuitem_subscript.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("comma"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_subscript', gtk.ICON_SIZE_MENU)
menuitem_subscript.set_image(img)
menu_format.append(menuitem_subscript)
menuitem_format.set_submenu(menu_format)
menubar1.append(menuitem_format)
##
menuitem_tools = gtk.MenuItem(_("_Tools"))
menuitem_tools.show()
menu_tools = gtk.Menu()
menu_tools.append(gtk.TearoffMenuItem())
menuitem_word_count = gtk.ImageMenuItem(_("_Word Count"))
img = gtk.image_new_from_icon_name('gtk-index', gtk.ICON_SIZE_MENU)
menuitem_word_count.set_image(img)
menuitem_word_count.show()
menuitem_word_count.connect("activate", self.on_word_counts)
menuitem_word_count.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("c"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_tools.append(menuitem_word_count)
menuitem_tools.set_submenu(menu_tools)
menubar1.append(menuitem_tools)
##
menuitem_help = gtk.MenuItem(_("_Help"))
menuitem_help.show()
menu_help = gtk.Menu()
menu_help.append(gtk.TearoffMenuItem())
menuitem_about = gtk.ImageMenuItem("gtk-about")
menuitem_about.show()
menuitem_about.connect("activate", self.on_about)
menu_help.append(menuitem_about)
menuitem_help.set_submenu(menu_help)
menubar1.append(menuitem_help)
menubar1.show_all()
self.vbox1.pack_start(menubar1, False, False, 0)
## 工具栏
self.toolbar1 = gtk.Toolbar()
self.toolbar1.show()
toolbutton_new = gtk.ToolButton()
toolbutton_new.set_tooltip_text(_("New"))
toolbutton_new.show()
toolbutton_new.set_stock_id(gtk.STOCK_NEW)
toolbutton_new.connect("clicked", self.on_new)
self.toolbar1.add(toolbutton_new)
toolbutton_open = gtk.MenuToolButton(gtk.STOCK_OPEN)
toolbutton_open.set_tooltip_text(_("Open"))
toolbutton_open.show()
#toolbutton_open.set_stock_id(gtk.STOCK_OPEN)
toolbutton_open.connect("clicked", self.on_open)
toolbutton_open.set_menu(menu_recent)
self.toolbar1.add(toolbutton_open)
toolbutton_save = gtk.ToolButton()
toolbutton_save.set_tooltip_text(_("Save"))
toolbutton_save.show()
toolbutton_save.set_stock_id(gtk.STOCK_SAVE)
toolbutton_save.connect("clicked", self.on_save)
self.toolbar1.add(toolbutton_save)
separatortoolitem1 = gtk.SeparatorToolItem()
separatortoolitem1.show()
self.toolbar1.add(separatortoolitem1)
toolbutton_undo = gtk.ToolButton()
toolbutton_undo.set_tooltip_text(_("Undo"))
toolbutton_undo.show()
toolbutton_undo.set_stock_id(gtk.STOCK_UNDO)
toolbutton_undo.connect("clicked", self.do_undo)
self.toolbar1.add(toolbutton_undo)
toolbutton_redo = gtk.ToolButton()
toolbutton_redo.set_tooltip_text(_("Redo"))
toolbutton_redo.show()
toolbutton_redo.set_stock_id(gtk.STOCK_REDO)
toolbutton_redo.connect("clicked", self.do_redo)
self.toolbar1.add(toolbutton_redo)
separatortoolitem3 = gtk.SeparatorToolItem()
separatortoolitem3.show()
self.toolbar1.add(separatortoolitem3)
toolbutton_cut = gtk.ToolButton()
toolbutton_cut.set_tooltip_text(_("Cut"))
toolbutton_cut.show()
toolbutton_cut.set_stock_id(gtk.STOCK_CUT)
toolbutton_cut.connect("clicked", self.do_cut)
self.toolbar1.add(toolbutton_cut)
toolbutton_copy = gtk.ToolButton()
toolbutton_copy.set_tooltip_text(_("Copy"))
toolbutton_copy.show()
toolbutton_copy.set_stock_id(gtk.STOCK_COPY)
toolbutton_copy.connect("clicked", self.do_copy)
self.toolbar1.add(toolbutton_copy)
toolbutton_paste = gtk.ToolButton()
toolbutton_paste.set_tooltip_text(_("Paste"))
toolbutton_paste.show()
toolbutton_paste.set_stock_id(gtk.STOCK_PASTE)
toolbutton_paste.connect("clicked", self.do_paste)
self.toolbar1.add(toolbutton_paste)
separatortoolitem2 = gtk.SeparatorToolItem()
separatortoolitem2.show()
self.toolbar1.add(separatortoolitem2)
## p, h1, h2 样式
label1 = gtk.Label("")
label1.set_markup("<b>P</b>")
button1 = gtk.ToolButton(label1, _("Paragraph"))
button1.set_tooltip_text(_("Paragraph"))
button1.connect("clicked", self.do_formatblock_p)
button1.show()
self.toolbar1.add( button1)
label1 = gtk.Label("")
label1.set_markup("<big><big><b>H1</b></big></big>")
button1 = gtk.ToolButton(label1, _("Heading 1"))
button1.set_tooltip_text(_("Heading 1"))
button1.connect("clicked", self.do_formatblock_h1)
button1.show()
self.toolbar1.add( button1)
label1 = gtk.Label("")
label1.set_markup("<big><b>H2</b></big>")
button1 = gtk.ToolButton(label1, _("Heading 2"))
button1.set_tooltip_text(_("Heading 2"))
button1.connect("clicked", self.do_formatblock_h2)
button1.show()
self.toolbar1.add( button1)
## h3 样式
label1 = gtk.Label("")
label1.set_markup("<b>H3</b>")
button1 = gtk.MenuToolButton(label1, _("Heading 3"))
button1.set_tooltip_text(_("Heading 3"))
button1.set_arrow_tooltip_markup(_("Style"))
button1.connect("clicked", self.do_formatblock_h3)
button1.show()
self.toolbar1.add( button1)
menu_style = gtk.Menu()
menuitem_heading_4 = gtk.ImageMenuItem(_("Heading _4"))
menuitem_heading_4.show()
menuitem_heading_4.connect("activate", self.do_formatblock_h4)
img = gtk.image_new_from_icon_name('stock_line-spacing-1.5', gtk.ICON_SIZE_MENU)
menuitem_heading_4.set_image(img)
menu_style.append(menuitem_heading_4)
menuitem_heading_5 = gtk.ImageMenuItem(_("Heading _5"))
menuitem_heading_5.show()
menuitem_heading_5.connect("activate", self.do_formatblock_h5)
img = gtk.image_new_from_icon_name('stock_list_enum-off', gtk.ICON_SIZE_MENU)
menuitem_heading_5.set_image(img)
menu_style.append(menuitem_heading_5)
menuitem_heading_6 = gtk.ImageMenuItem(_("Heading _6"))
menuitem_heading_6.show()
menuitem_heading_6.connect("activate", self.do_formatblock_h6)
img = gtk.image_new_from_icon_name('stock_list_enum-off', gtk.ICON_SIZE_MENU)
menuitem_heading_6.set_image(img)
menu_style.append(menuitem_heading_6)
menuitem_separator5 = gtk.MenuItem()
menuitem_separator5.show()
menu_style.append(menuitem_separator5)
menuitem_bulleted_list = gtk.ImageMenuItem(_("_Bulleted List"))
menuitem_bulleted_list.show()
menuitem_bulleted_list.connect("activate", self.do_insertunorderedlist)
img = gtk.image_new_from_icon_name('stock_list_bullet', gtk.ICON_SIZE_MENU)
menuitem_bulleted_list.set_image(img)
menu_style.append(menuitem_bulleted_list)
menuitem_numbered_list = gtk.ImageMenuItem(_("Numbered _List"))
menuitem_numbered_list.show()
menuitem_numbered_list.connect("activate", self.do_insertorderedlist)
img = gtk.image_new_from_icon_name('stock_list_enum', gtk.ICON_SIZE_MENU)
menuitem_numbered_list.set_image(img)
menu_style.append(menuitem_numbered_list)
menuitem_separator6 = gtk.MenuItem()
menuitem_separator6.show()
menu_style.append(menuitem_separator6)
div1 = gtk.ImageMenuItem(_("Di_v"))
div1.show()
div1.connect("activate", self.do_formatblock_div)
img = gtk.image_new_from_icon_name('stock_tools-hyphenation', gtk.ICON_SIZE_MENU)
div1.set_image(img)
menu_style.append(div1)
address1 = gtk.ImageMenuItem(_("A_ddress"))
address1.show()
address1.connect("activate", self.do_formatblock_address)
img = gtk.image_new_from_icon_name('stock_tools-hyphenation', gtk.ICON_SIZE_MENU)
address1.set_image(img)
menu_style.append(address1)
#menuitem_formatblock_code = gtk.ImageMenuItem(_("_Code"))
#menuitem_formatblock_code.show()
#menuitem_formatblock_code.connect("activate", self.do_formatblock_code)
#
#img = gtk.image_new_from_icon_name('stock_text-monospaced', gtk.ICON_SIZE_MENU)
#menuitem_formatblock_code.set_image(img)
#menu_style.append(menuitem_formatblock_code)
menuitem_formatblock_blockquote = gtk.ImageMenuItem(_("Block_quote"))
menuitem_formatblock_blockquote.show()
menuitem_formatblock_blockquote.connect("activate", self.do_formatblock_blockquote)
img = gtk.image_new_from_icon_name('stock_list-insert-unnumbered', gtk.ICON_SIZE_MENU)
menuitem_formatblock_blockquote.set_image(img)
menu_style.append(menuitem_formatblock_blockquote)
menuitem_formatblock_pre = gtk.ImageMenuItem(_("_Preformat"))
menuitem_formatblock_pre.show()
menuitem_formatblock_pre.connect("activate", self.do_formatblock_pre)
img = gtk.image_new_from_icon_name('stock_text-quickedit', gtk.ICON_SIZE_MENU)
menuitem_formatblock_pre.set_image(img)
menu_style.append(menuitem_formatblock_pre)
button1.set_menu(menu_style)
########################
## 粗体按钮菜单
menu_format = gtk.Menu()
menu_format.append(gtk.TearoffMenuItem())
menuitem_italic = gtk.ImageMenuItem("gtk-italic")
menuitem_italic.show()
menuitem_italic.connect("activate", self.do_italic)
menu_format.append(menuitem_italic)
menuitem_underline = gtk.ImageMenuItem("gtk-underline")
menuitem_underline.show()
menuitem_underline.connect("activate", self.do_underline)
menu_format.append(menuitem_underline)
menuitem_strikethrough = gtk.ImageMenuItem("gtk-strikethrough")
menuitem_strikethrough.show()
menuitem_strikethrough.connect("activate", self.do_strikethrough)
menu_format.append(menuitem_strikethrough)
separatortoolitem4 = gtk.SeparatorToolItem()
separatortoolitem4.show()
self.toolbar1.add(separatortoolitem4)
toolbutton_bold = gtk.MenuToolButton(gtk.STOCK_BOLD)
toolbutton_bold.set_label(_("Bold"))
toolbutton_bold.set_tooltip_text(_("Bold"))
toolbutton_bold.show()
toolbutton_bold.set_stock_id(gtk.STOCK_BOLD)
toolbutton_bold.connect("clicked", self.on_bold)
toolbutton_bold.set_menu(menu_format)
self.toolbar1.add(toolbutton_bold)
## 高亮颜色
toolbutton_hilitecolor = gtk.MenuToolButton("")
toolbutton_hilitecolor.set_icon_name("stock_text_color_hilight")
toolbutton_hilitecolor.set_label(_("Highlight"))
toolbutton_hilitecolor.set_tooltip_text(_("Highlight"))
toolbutton_hilitecolor.set_arrow_tooltip_markup(_("Select hilitecolor"))
toolbutton_hilitecolor.set_menu(gtk.Menu())
toolbutton_hilitecolor.show()
toolbutton_hilitecolor.connect("clicked", self.do_color_hilitecolor)
### 处理 ToolButton 箭头
on_color_select_hilitecolor = self.on_color_select_hilitecolor
ib, mb = toolbutton_hilitecolor.get_children()[0].get_children()
mb.connect("clicked", self.on_color_select_hilitecolor)
self.toolbar1.add(toolbutton_hilitecolor)
## 清除格式
button1 = gtk.ToolButton()
button1.set_icon_name("gtk-clear")
button1.set_label(_("Clear format"))
button1.set_tooltip_text(_("Clear format"))
button1.show()
button1.connect("clicked", self.do_removeformat)
self.toolbar1.add(button1)
### 字体菜单按钮
#toolbutton_font = gtk.MenuToolButton("gtk-select-font")
#toolbutton_font.set_label(_("Font"))
#toolbutton_font.set_tooltip_text(_("Font"))
#toolbutton_font.show()
#toolbutton_font.set_menu(self.fontname_menu)
### 处理 gtk.MenuToolButton 按钮
#m = toolbutton_font
#ib, mb = m.child.children()
#mb.remove(mb.child)
#ib.child.reparent(mb)
#m.child.remove(ib)
#self.toolbar1.add(toolbutton_font)
##
###############
self.toolbar = gtk.HandleBox()
self.toolbar.add(self.toolbar1)
self.toolbar.show_all()
self.vbox1.pack_start(self.toolbar, False, False, 0)
## 编辑区
#self.editport = gtk.Viewport()
#self.editport.show()
#self.editport.set_shadow_type(gtk.SHADOW_NONE)
#
#self.vbox1.pack_start(self.editport)
##
self.notebox = gtk.Notebook()
self.notebox.set_tab_pos(2) # 0, 1, 2, 3 -> left, top, right, bottom
self.notebox.set_border_width(0)
#self.notebox.popup_enable()
self.notebox.set_property('homogeneous', 0)
self.notebox.unset_flags(gtk.CAN_FOCUS)
self.notebox.set_scrollable(True)
self.notebox.connect("switch-page", self.on_mdi_switch_page)
self.notebox.connect("button-press-event", self.on_mdi_menu) # 用 "button-release-event" 会不能中止事件向上传递
self.notebox.show()
editbox = self.new_edit(self.editfile)
editbox.show()
self.notebox_insert_page(editbox)
self.notebox.set_tab_reorderable(editbox, True)
self.notebox.show_all()
self.vbox1.pack_start(self.notebox)
## 搜索栏
self.findbar = gtk.HandleBox()
self.findbar.set_shadow_type(gtk.SHADOW_OUT)
self.findbox = gtk.HBox(False, 0)
self.findbox.show()
button_hidefindbar = gtk.Button()
button_hidefindbar.set_tooltip_text(_("Close Findbar"))
button_hidefindbar.show()
button_hidefindbar.set_relief(gtk.RELIEF_NONE)
button_hidefindbar.connect("clicked", self.hide_findbar)
image113 = gtk.Image()
image113.set_from_stock(gtk.STOCK_CLOSE, 1)
image113.show()
button_hidefindbar.add(image113)
self.findbox.pack_start(button_hidefindbar, False, False, 0)
self.entry_searchtext = gtk.Entry()
self.entry_searchtext.show()
self.entry_searchtext.connect("changed", self.do_highlight_text_matches)
#self.entry_searchtext.set_property("primary-icon-stock", "gtk-go-back")
#self.entry_searchtext.set_property("primary-icon-tooltip-text", _("Find Previous"))
#self.entry_searchtext.set_property("secondary-icon-stock", "gtk-find")
#self.entry_searchtext.set_property("secondary-icon-tooltip-text", _("Find Next"))
self.entry_searchtext.set_property("primary-icon-stock", "gtk-find")
self.entry_searchtext.set_property("primary-icon-tooltip-text", _("Find Next"))
self.entry_searchtext.connect("icon-release", self.do_find_text)
self.entry_searchtext.set_tooltip_text(_("Search text"))
#self.entry_searchtext.set_flags(gtk.CAN_DEFAULT)
#self.entry_searchtext.grab_focus()
self.findbox.pack_start(self.entry_searchtext)
button1 = gtk.Button()
button1.set_tooltip_text(_("Find Previous"))
button1.show()
button1.set_relief(gtk.RELIEF_NONE)
button1.connect("clicked", self.do_find_text_backward)
image1 = gtk.Image()
image1.set_from_stock(gtk.STOCK_GO_BACK, 4)
image1.show()
button1.add(image1)
self.findbox.pack_start(button1, False, False, 0)
button_search_text = gtk.Button(_("Find"))
img = gtk.Image()
img.set_from_stock("gtk-find", 4)
img.show()
button_search_text.set_image(img)
button_search_text.set_tooltip_text(_("Find Next"))
button_search_text.show()
button_search_text.set_relief(gtk.RELIEF_NONE)
button_search_text.connect("clicked", self.do_find_text)
button_search_text.add_accelerator("clicked", self.accel_group, gtk.gdk.keyval_from_name("F3"), 0, gtk.ACCEL_VISIBLE)
self.findbox.pack_start(button_search_text, False, False, 0)
self.findbox.pack_start(gtk.VSeparator(), False, False, 3)
self.entry_replace_text = gtk.Entry()
self.entry_replace_text.show()
self.entry_replace_text.set_tooltip_text(_("Replace text"))
self.entry_replace_text.set_property("primary-icon-stock", "gtk-find-and-replace")
self.entry_replace_text.set_property("primary-icon-tooltip-text", _("Replace"))
self.findbox.pack_start(self.entry_replace_text)
button_replace_text = gtk.Button()
button_replace_text.set_tooltip_text(_("Replace"))
button_replace_text.show()
button_replace_text.set_relief(gtk.RELIEF_NONE)
button_replace_text.connect("clicked", self.do_replace_text)
alignment1 = gtk.Alignment(0.5, 0.5, 0, 0)
alignment1.show()
hbox2 = gtk.HBox(False, 0)
hbox2.show()
hbox2.set_spacing(2)
image136 = gtk.Image()
image136.set_from_stock(gtk.STOCK_FIND_AND_REPLACE, 4)
image136.show()
hbox2.pack_start(image136, False, False, 0)
label1 = gtk.Label(_("Replace"))
label1.show()
hbox2.pack_start(label1, False, False, 0)
alignment1.add(hbox2)
button_replace_text.add(alignment1)
self.findbox.pack_start(button_replace_text, False, False, 0)
#self.findbox.pack_start(gtk.VSeparator(), False, False, 0)
button2 = gtk.Button()
button2.set_tooltip_text(_("Replace All"))
button2.set_label(_("ReplaceAll"))
button2.show()
button2.set_relief(gtk.RELIEF_NONE)
img = gtk.Image()
img.set_from_stock("gtk-convert", 4)
img.show()
button2.set_image(img)
button2.connect("clicked", self.do_replace_text_all)
self.findbox.pack_start(button2, False, False, 0)
self.findbar.add(self.findbox)
self.vbox1.pack_start(self.findbar, False, False, 0)
#self.edit.contextmenu.append(menuitem_style)
#self.edit.connect("popup-menu", self._populate_popup)
if create:
self.window.add(self.vbox1)
pass
pass
def mdi_get_tab_menu(self, editbox=None, windowslist=0):
menu = gtk.Menu()
menuitem_new = gtk.ImageMenuItem("gtk-new")
menuitem_new.show()
menuitem_new.connect("activate", self.on_new)
menu.append(menuitem_new)
menuitem_close = gtk.ImageMenuItem("gtk-close")
menuitem_close.show()
menuitem_close.connect("activate", self.close_tab, editbox)
menu.append(menuitem_close)
menu.append(gtk.MenuItem())
notebox = self.notebox
for box in notebox.get_children():
menuitem = gtk.ImageMenuItem(box.edit.title)
menuitem.set_image(gtk.image_new_from_stock("gtk-dnd", gtk.ICON_SIZE_MENU))
menuitem.connect("activate", self.notebox_set_current, box)
menuitem.show()
menu.append(menuitem)
pass
if windowslist and config.single_instance_mode:
pass
menu.show_all()
return menu
def on_accel_connect_group(self, accel_group, acceleratable, keyval, modifier):
## 按 Alt-1, Alt-2... 切换标签页
## gtk.gdk.keyval_from_name('1') 为 49
num = keyval - 49
self.notebox.set_current_page(num)
return
def on_mdi_menu(self, widget, event, editbox=None, *args):
#-print self, widget, event, editbox, args
if event.button == 3:
#menu = self.menu_file
menu = self.mdi_get_tab_menu(editbox)
menu.popup(None, None, None, event.button, event.time)
return True
elif (
( event.type.value_name == "GDK_BUTTON_PRESS" and event.button == 2 ) or
( event.type.value_name == "GDK_2BUTTON_PRESS" and event.button == 1 )
):
# 标签上 中键/双击 关闭,空白处 中键/双击 新建
if editbox:
self.close_tab(editbox)
pass
else:
self.on_new()
pass
return True
#box = self.notebox
#label = box.get_tab_label( box.get_nth_page( box.get_current_page() ) )
return False
def on_mdi_switch_page(self, notebook, page, page_num, *user_param):
#-print 'on_mdi_switch_page:', notebook, page, page_num
## show/hide tabbar
self.notebox.unset_flags(gtk.CAN_FOCUS)
if self.notebox.get_n_pages() > 1:
self.notebox.set_show_tabs(True)
pass
else:
self.notebox.set_show_tabs(False)
pass
## edit, linkview
editbox = self.notebox.get_nth_page(page_num)
self.editbox = editbox
self.edit = editbox.edit
self.linkview = editbox.linkview
##
#self.edit.set_flags(gtk.CAN_DEFAULT)
#if self.edit.editfile: self.window.set_title(os.path.basename(self.editfile) + ' - ' + Title)
self.window.set_title(self.edit.title + ' - ' + Title)
##
try:
self.do_highlight_text_matches()
except:
pass
pass
def on_over_link(self, edit, alt, href):
#-print edit, alt, href
href = href or ""
uri = edit.get_main_frame().get_uri()
url = urllib2.unquote(uri)
if "#" in href and uri.split('#', 1)[0] == href.split('#', 1)[0]:
href = "#" + href.split('#', 1)[1]
self.window.set_tooltip_text(href)
pass
def notebox_set_current(self, widget, editbox=None):
editbox = editbox or widget # 考虑非事件的调用
num = self.notebox.page_num(editbox)
self.notebox.set_current_page(num)
self.window.present()
return
def notebox_set_label_text(self, editbox, text):
#self.notebox.set_tab_label_text(editbox, text)
self.notebox.set_menu_label_text(editbox, text)
label = gtk.Label(text)
label.show()
box = gtk.EventBox()
box.set_visible_window(0)
box.connect("button-press-event", self.on_mdi_menu, editbox)
box.add(label)
self.notebox.set_tab_label(editbox, box)
pass
def notebox_insert_page(self, editbox):
cn = self.notebox.get_current_page()
n = self.notebox.insert_page(editbox, None, cn+1)
self.notebox_set_label_text(editbox, editbox.edit.title)
self.notebox.set_tab_reorderable(editbox, True)
#self.notebox.show_all()
self.notebox.set_current_page(n)
##
#self.notebox.get_tab_label(editbox).connect("button-press-event", self.on_mdi_menu)
return
def new_edit(self, editfile):
global new_num
editbox = gtk.VBox()
editbox.show()
separator = gtk.HSeparator()
separator.show()
editbox.pack_start(separator, False, False)
hpaned = gtk.HPaned()
hpaned.set_border_width(0)
hpaned.set_position(170)
hpaned.show()
editbox.pack_start(hpaned, True, True)
## 导航栏
vbox1 = gtk.VBox()
label1 = gtk.Label(_("Navigation Pane"))
label1.set_alignment(0, 0)
vbox1.pack_start(label1, False, False)
scrolledwindow1 = gtk.ScrolledWindow()
scrolledwindow1.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolledwindow1.show()
scrolledwindow1.set_shadow_type(gtk.SHADOW_IN)
import webkitlinkview
linkview = webkitlinkview.LinkTextView()
linkview.connect('url-clicked', self.on_title_clicked)
linkview.connect('populate-popup', self._linkview_populate_popup)
linkview.show()
scrolledwindow1.add(linkview)
editbox.linkview = linkview
vbox1.pack_start(scrolledwindow1)
vbox1.show_all()
hpaned.pack1(vbox1, False, True)
editbox.navigation_pane = vbox1
## 编辑区
import webkitedit
scrolledwindow2 = gtk.ScrolledWindow()
scrolledwindow2.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolledwindow2.show()
scrolledwindow2.set_shadow_type(gtk.SHADOW_IN)
edit = webkitedit.WebKitEdit(editfile)
edit.show()
edit.connect("load-finished", self.on_load_finished)
edit.connect("hovering-over-link", self.on_over_link)
edit.set_flags(gtk.CAN_FOCUS)
edit.set_flags(gtk.CAN_DEFAULT)
self.window.present()
scrolledwindow2.add(edit)
editbox.edit = edit
hpaned.pack2(scrolledwindow2, True, True)
if editfile:
edit.lastDir = os.path.dirname(editfile)
edit.title = os.path.basename(editfile)
self.add_recent(editfile)
pass
else:
if config.mdi_mode or config.single_instance_mode:
edit.title = _("[New Document] %s") % new_num
new_num += 1
pass
else:
edit.title = _("[New Document]")
pass
editbox.connect("button-press-event", lambda *i: True) ## 中止鼠标按钮事件向上传递
gobject.idle_add(proc_webkit_color, edit, linkview)
return editbox
def _populate_popup(self, view, menu):
pass
def zoom(self, level):
self.edit.set_zoom_level(level)
pass
def zoom_100(self, *args):
self.edit.set_zoom_level(1.0)
pass
def zoom_in(self, *args):
self.edit.zoom_in()
pass
def zoom_out(self, *args):
self.edit.zoom_out()
pass
def _linkview_populate_popup(self, view, menu):
# 检查是否有链接相关菜单项
href = ""
if menu_find_with_stock(menu, 'gtk-open') > -1:
href = view.get_main_frame().get_title()
pass
## 取消原先的菜单
#menu.destroy()
#menu = gtk.Menu()
for i in menu.get_children():
menu.remove(i)
pass
## 跳转到
if href:
menuitem_jump_to = gtk.ImageMenuItem("gtk-jump-to")
menuitem_jump_to.show()
menuitem_jump_to.connect("activate", self.edit.go_anchor, href)
menu.append(menuitem_jump_to)
menuitem_select = gtk.ImageMenuItem(_("_Select this"))
menuitem_select.set_image(gtk.image_new_from_stock(gtk.STOCK_SELECT_ALL, gtk.ICON_SIZE_MENU))
menuitem_select.show()
menuitem_select.set_tooltip_markup(_("您也可以直接<b>双击</b>以选择该章节文字"))
menuitem_select.connect("activate", self.edit.select_section, href)
menu.append(menuitem_select)
menu.append(gtk.MenuItem())
pass
## 更新目录
menuitem_update_contents = gtk.ImageMenuItem(_("Update _Contents"))
menuitem_update_contents.show()
menuitem_update_contents.connect("activate", self.view_update_contents)
img = gtk.image_new_from_stock(gtk.STOCK_INDEX, gtk.ICON_SIZE_MENU)
menuitem_update_contents.set_image(img)
menu.append(menuitem_update_contents)
menuitem_toggle_numbered_title = gtk.ImageMenuItem(_("Toggle _Numbered Title"))
menuitem_toggle_numbered_title.show()
menuitem_toggle_numbered_title.connect("activate", self.view_toggle_autonumber)
img = gtk.image_new_from_stock(gtk.STOCK_SORT_DESCENDING, gtk.ICON_SIZE_MENU)
menuitem_toggle_numbered_title.set_image(img)
menu.append(menuitem_toggle_numbered_title)
## 缩放菜单
linkview = self.linkview
menuitem_separator10 = gtk.MenuItem()
menuitem_separator10.show()
menu.append(menuitem_separator10)
menuitem_zoom_in = gtk.ImageMenuItem(gtk.STOCK_ZOOM_IN)
menuitem_zoom_in.connect("activate", lambda *i: linkview.zoom_in())
menuitem_zoom_in.show()
menu.append(menuitem_zoom_in)
menuitem_zoom_out = gtk.ImageMenuItem(gtk.STOCK_ZOOM_OUT)
menuitem_zoom_out.connect("activate", lambda *i: linkview.zoom_out())
menuitem_zoom_out.show()
menu.append(menuitem_zoom_out)
menuitem_zoom_100 = gtk.ImageMenuItem(gtk.STOCK_ZOOM_100)
menuitem_zoom_100.connect("activate", lambda *i: linkview.set_zoom_level(1.0))
menuitem_zoom_100.show()
menu.append(menuitem_zoom_100)
menu.show_all()
pass
def on_title_clicked(self, widget, href, type):
if href.startswith('+'):
self.edit.select_section(href.split('#', 1)[1])
return True
href = href.split('#', 1)[1]
self.edit.go_anchor(href)
pass
def on_load_finished(self, edit, *args):
#-print 'on_load_finished:'
self.view_update_contents()
if edit._html == "":
edit.set_saved()
pass
pass
def close_tab(self, widget=None, editbox=None, *args):
notebox = self.notebox
if widget and 'edit' in widget.__dict__:
editbox = widget
pass
if not editbox:
n = notebox.get_current_page()
editbox = notebox.get_nth_page(n)
pass
edit = editbox.edit
linkview = editbox.linkview
self.window.show()
if not edit.is_saved():
## r: 1, -1, 0 => yes, no, cancel
r = gtkdialogs.savechanges(_("%s Save Changes?") % edit.title)
if r == 1:
filename = self.on_save()
if not filename:
return True
pass
elif r == 0:
return True
pass
# 关闭标签
notebox.remove(editbox)
edit.destroy()
linkview.destroy()
editbox.destroy()
# 无标签时关闭窗口
if self.notebox.get_n_pages():
return True
Windows.remove(self)
gtk.gdk.threads_leave()
self.window.destroy()
if not Windows:
gtk.main_quit()
return
def on_close(self, *args):
'''关闭窗口
'''
#-print 'on_close:', self
#@TODO: 退出时未保存提示
for i in range(self.notebox.get_n_pages()):
self.close_tab()
pass
if self.notebox.get_n_pages():
return True
try:
Windows.remove(self)
pass
except:
pass
gtk.gdk.threads_leave()
self.window.destroy()
if not Windows:
gtk.main_quit()
pass
def on_quit(self, *args):
#-print 'on_quit:'
windows = reversed(Windows)
for window in windows:
window.on_close()
pass
gtk.main_quit()
pass
def on_new(self, *args):
#-print 'on_new:'
return self.open("")
def on_new_window(self, *args):
'''打开新窗口
'''
if config.single_instance_mode:
return MainWindow()
else:
return os.spawnvp(os.P_NOWAIT, sys.argv[0], ['gwrite'])
pass
def add_recent(self, filename):
uri = 'file://' + filename
self.recent.add_full(uri, {'mime_type':'text/html', 'app_name':'gwrite', 'app_exec':'gwrite', 'group':'gwrite'})
def open(self, filename=""):
self.window.present()
# mdi mode
if config.mdi_mode:
if filename:
for editbox in self.notebox.get_children():
if editbox.edit.editfile == filename:
self.notebox.set_current_page(self.notebox.page_num(editbox))
return
pass
pass
editbox = self.new_edit(filename)
self.notebox_insert_page(editbox)
return
# 如果当前空文档,则在当前窗口打开
if filename and self.edit.editfile == '' and self.edit.is_saved():
self.window.set_title(os.path.basename(filename) + ' - ' + Title)
self.edit.lastDir = os.path.dirname(filename)
self.edit.editfile = filename
self.edit._html = ""
if filename and os.access(filename, os.R_OK):
self.edit.open(filename)
self.add_recent(filename)
pass
pass
elif config.single_instance_mode:
MainWindow(editfile = filename)
pass
else:
if filename:
os.spawnvp(os.P_NOWAIT, sys.argv[0], ['gwrite', filename])
pass
else:
os.spawnvp(os.P_NOWAIT, sys.argv[0], ['gwrite'])
pass
pass
pass
def on_select_recent(self, menu):
filename = menu. get_current_item().get_uri_display()
#-print 'on_select_recent:', filename
self.open(filename)
pass
def on_open(self, *args):
#-print 'on_open:'
filename = gtkdialogs.open(title=_('Open'),
name_mimes=[
[_("Html Document"), "text/html"],
[_("MS Doc Document"), "application/msword"],
])
if filename and os.access(filename, os.R_OK):
self.open(filename)
pass
gtk.gdk.threads_leave()
pass
def on_save(self, *args):
#-print 'on_save:'
html = self.edit.get_html()
if self.edit.editfile:
filename = self.edit.editfile
else:
#current_name = _('新建文档')
#current_name = ''
current_name = get_doctitle(html)
filename = gtkdialogs.save(title=_('Save'),
name_mimes=[[_("Html Document"), "text/html"]],
current_name=current_name,)
if filename and not '.' in os.path.basename(filename):
filename = filename + '.html'
if filename:
try:
file(filename, 'w').write(html)
pass
except:
gtkdialogs.warning(_("Unable to write to file."))
return False
self.edit.lastDir = os.path.dirname(filename)
if not self.edit.editfile: self.add_recent(filename) #添加到最近文件
self.editfile = filename
self.edit.set_saved()
self.window.set_title(os.path.basename(filename) + ' - ' + Title)
## 更新标签名
self.edit.editfile = filename
self.edit.title = os.path.basename(filename)
self.notebox_set_label_text(self.editbox, self.edit.title)
pass
gtk.gdk.threads_leave()
return filename
def on_save_as(self, *args):
#-print 'on_save_as:'
html = self.edit.get_html()
#current_name = _('新建文档')
#current_name = ''
current_name = get_doctitle(html)
filename = gtkdialogs.save(title=_('Save As'),
name_mimes=[[_("Html Document"), "text/html"]],
current_name=current_name, folder=self.edit.lastDir,)
if filename and not '.' in os.path.basename(filename):
filename = filename + '.html'
if filename:
try:
file(filename, 'w').write(html)
pass
except:
gtkdialogs.warning(_("Unable to write to file."))
return False
self.add_recent(filename) #添加到最近文件
self.edit.lastDir = os.path.dirname(filename)
pass
gtk.gdk.threads_leave()
pass
def on_word_counts(self, *args):
document = self.edit.get_text().decode('utf8')
selection = self.edit.get_selection()
#-print text
#-print selection
# 行: '', 文档, 选中范围
# 列: 字数及英文单词数, 字符数(含空格), 字符数(不含空格), 段落数, 行数, 英文单词, 中文字
text = document
words_cn = len( re.findall(u'[\u4e00-\uffff]', text) )
words_en = len( re.findall(u'\\w+', text) )
words = words_cn + words_en
characters_with_spaces = len(text)
characters_no_spaces = len(''.join(text.split()))
_lines = text.splitlines()
lines = len(_lines)
paragraphs = len([i for i in _lines if i])
##
text = selection
s_words_cn = len( re.findall(u'[\u4e00-\uffff]', text) )
s_words_en = len( re.findall(u'\\w+', text) )
s_words = s_words_cn + s_words_en
s_characters_with_spaces = len(text)
s_characters_no_spaces = len(''.join(text.split()))
_s_lines = text.splitlines()
s_lines = len(_s_lines)
s_paragraphs = len([i for i in _s_lines if i])
info = (
("", _("Document"), selection and _("Selection")),
(_("Words: "), words, selection and s_words, ),
(_("Characters (with spaces): "), characters_with_spaces, selection and s_characters_with_spaces),
(_("Characters (no spaces): "), characters_no_spaces, selection and s_characters_no_spaces),
(_("Paragraphs: "), paragraphs, selection and s_paragraphs),
(_("Lines: "), lines, selection and s_lines),
(_("English words: "), words_en, selection and s_words_en),
(_("Chinese characters: "), words_cn, selection and s_words_cn),
)
#-print info
gtkdialogs.infotablebox(_("Word Counts"), "<b>%s</b>" % self.edit.title, info)
return
def on_print(self, *args):
#-print 'on_print:'
self.edit.do_print()
pass
def do_undo(self, *args):
#-print 'do_undo:'
self.window.present()
self.edit.do_undo()
pass
def do_redo(self, *args):
#-print 'do_redo:'
self.window.present()
self.edit.do_redo()
pass
def do_cut(self, *args):
#-print 'do_cut:'
self.window.present()
self.edit.do_cut()
pass
def do_copy(self, *args):
#-print 'do_copy:'
self.window.present()
self.edit.do_copy()
pass
def do_paste(self, *args):
#-print 'do_paste:'
self.window.present()
self.edit.do_paste()
pass
def do_paste_unformatted(self, *args):
#-print 'do_paste_unformatted:'
self.edit.do_paste_unformatted()
return
def do_delete(self, *args):
#-print 'do_delete:'
self.window.present()
self.edit.do_delete()
pass
def do_selectall(self, *args):
#-print 'do_selectall:'
self.window.present()
self.edit.do_selectall()
pass
def show_findbar(self, *args):
#-print 'show_findbar:'
self.findbar.show_all()
self.entry_searchtext.grab_focus()
self.do_find_text(self.entry_searchtext)
pass
def view_update_contents(self, *args):
#-print 'view_update_contents:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_view_update_contents() )
pass
def view_toggle_autonumber(self, *args):
#-print 'view_toggle_autonumber:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_view_toggle_autonumber() )
pass
def view_sourceview(self, *args):
#-print 'view_sourceview:'
self.window.present()
## 源码模式隐藏导航栏
#@NOTE 执行顺序和 idle_add 是为了避免闪烁
if not self.edit.get_view_source_mode():
## 先转到源码模式,再 idle_add 隐藏导航条,以便显示变化平滑
self.edit.toggle_html_view()
gobject.idle_add( self.editbox.navigation_pane.hide )
pass
else:
## 先显示导航条,再 idle_add 转为所见所得模式,以便显示变化平滑
self.editbox.navigation_pane.show_all()
gobject.idle_add( self.edit.toggle_html_view )
pass
#self.edit.do_bodyhtml_view()
pass
def do_update_images(self, *args):
#-print 'do_update_images:'
self.window.present()
self.edit.do_image_base64()
pass
def do_insertimage(self, *args):
#-print 'do_insertimage:'
src = gtkdialogs.open(title=_('InsertImage'), name_mimes=[[_("Image Files"), "image/*"]])
if src:
self.edit.do_insertimage(src)
pass
def do_createlink(self, *args):
#-print 'do_createlink:'
##print self.edit.get_link_message()
link = gtkdialogs.inputbox(title=_('Create Link'), label=_('URL:'), text="")
if link and link != "http://":
self.edit.do_createlink(link)
pass
def do_inserthorizontalrule(self, *args):
#-print 'do_inserthorizontalrule:'
self.window.present()
self.edit.do_inserthorizontalrule()
pass
def do_insert_table(self, *args):
#-print 'do_insert_table:'
cow,row = gtkdialogs.spinbox2(title=_('Insert Table'),label1=_('Rows:'),value1=3, label2=_('Cows:'),value2=3)
self.edit.do_insert_table(cow, row)
pass
def do_insert_html(self, *args):
#-print 'do_insert_html:'
html = gtkdialogs.textbox(title=_('Insert Html'), text='')
if html:
self.edit.do_insert_html(html)
pass
def do_insert_latex_math_equation(self, *args):
'''Insert Latex math equation
'''
latex = gtklatex.latex_dlg()
if latex:
img = gtklatex.tex2html(latex)
self.edit.do_insert_html(img)
pass
pass
def do_insert_contents(self, *args):
#-print 'do_insert_contents:'
self.window.present()
self.edit.do_insert_contents()
pass
def do_formatblock_p(self, *args):
#-print 'do_formatblock_p:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_formatblock_p() )
pass
def do_formatblock_h1(self, *args):
#-print 'do_formatblock_h1:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_formatblock_h1() )
pass
def do_formatblock_h2(self, *args):
#-print 'do_formatblock_h2:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_formatblock_h2() )
pass
def do_formatblock_h3(self, *args):
#-print 'do_formatblock_h3:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_formatblock_h3() )
pass
def do_formatblock_h4(self, *args):
#-print 'do_formatblock_h4:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_formatblock_h4() )
pass
def do_formatblock_h5(self, *args):
#-print 'do_formatblock_h5:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_formatblock_h5() )
pass
def do_formatblock_h6(self, *args):
#-print 'do_formatblock_h6:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_formatblock_h6() )
pass
def do_insertunorderedlist(self, *args):
#-print 'do_insertunorderedlist:'
self.window.present()
self.edit.do_insertunorderedlist()
pass
def do_insertorderedlist(self, *args):
#-print 'do_insertorderedlist:'
self.window.present()
self.edit.do_insertorderedlist()
pass
def do_formatblock_div(self, *args):
#-print 'do_formatblock_address:'
self.window.present()
self.edit.do_formatblock_div()
pass
def do_formatblock_address(self, *args):
#-print 'do_formatblock_address:'
self.window.present()
self.edit.do_formatblock_address()
pass
def do_formatblock_code(self, *args):
#-print 'do_formatblock_code:'
self.window.present()
self.edit.do_formatblock_code()
pass
def do_formatblock_blockquote(self, *args):
#-print 'do_formatblock_blockquote:'
self.window.present()
self.edit.do_formatblock_blockquote()
pass
def do_formatblock_pre(self, *args):
#-print 'do_formatblock_pre:'
self.window.present()
self.edit.do_formatblock_pre()
pass
def on_bold(self, *args):
#-print 'on_bold:'
self.window.present()
self.edit.do_bold()
pass
def do_underline(self, *args):
#-print 'do_underline:'
self.window.present()
self.edit.do_underline()
pass
def do_italic(self, *args):
#-print 'do_italic:'
self.window.present()
self.edit.do_italic()
pass
def do_strikethrough(self, *args):
#-print 'do_strikethrough:'
self.window.present()
self.edit.do_strikethrough()
pass
def do_font_fontname(self, widget, fontname):
#-print 'do_font_fontname:', fontname
self.window.present()
self.edit.do_font_fontname(fontname)
pass
def do_fontsize_1(self, *args):
#-print 'do_fontsize_1:'
self.window.present()
self.edit.do_fontsize_11()
pass
def do_fontsize_2(self, *args):
#-print 'do_fontsize_2:'
self.window.present()
self.edit.do_fontsize_2()
pass
def do_fontsize_3(self, *args):
#-print 'do_fontsize_3:'
self.window.present()
self.edit.do_fontsize_3()
pass
def do_fontsize_4(self, *args):
#-print 'do_fontsize_4:'
self.window.present()
self.edit.do_fontsize_4()
pass
def do_fontsize_5(self, *args):
#-print 'do_fontsize_5:'
self.window.present()
self.edit.do_fontsize_5()
pass
def do_fontsize_6(self, *args):
#-print 'do_fontsize_6:'
self.window.present()
self.edit.do_fontsize_6()
pass
def do_fontsize_7(self, *args):
#-print 'do_fontsize_7:'
self.window.present()
self.edit.do_fontsize_7()
pass
def do_color_forecolor(self, *args):
#-print 'on_color_forecolor:'
if "forecolor" in self.__dict__:
self.edit.grab_focus()
self.edit.do_color_forecolor(self.forecolor)
pass
else:
self.on_color_select_forecolor()
pass
pass
def on_color_select_forecolor(self, *args):
#-print 'on_color_select_forecolor:'
color = gtkdialogs.colorbox()
if color:
self.forecolor = color
self.edit.do_color_forecolor (color)
pass
pass
def do_color_hilitecolor(self, *args):
#-print 'do_color_hilitecolor:'
if "hilitecolor" in self.__dict__:
self.edit.grab_focus()
self.edit.do_color_hilitecolor(self.hilitecolor)
pass
else:
self.on_color_select_hilitecolor()
pass
pass
def on_color_select_hilitecolor(self, *args):
#-print 'on_color_select_hilitecolor:', args
# 处理 gtk.MenuToolButton 箭头重复事件
if self.__dict__.get('_on_color_select_hilitecolor'):
return True
self._on_color_select_hilitecolor = 1
color = gtkdialogs.colorbox()
self._on_color_select_hilitecolor = 0
if color:
self.hilitecolor = color
self.edit.do_color_hilitecolor(color)
return False
def do_removeformat(self, *args):
#-print 'do_removeformat:'
self.window.present()
self.edit.do_removeformat()
pass
def do_justifyleft(self, *args):
#-print 'do_justifyleft:'
self.window.present()
self.edit.do_justifyleft()
pass
def do_justifycenter(self, *args):
#-print 'do_justifycenter:'
self.window.present()
self.edit.do_justifycenter()
pass
def do_justifyfull(self, *args):
#-print 'do_justify:'
self.window.present()
self.edit.do_justifyfull()
pass
def do_justifyright(self, *args):
#-print 'do_justifyright:'
self.window.present()
self.edit.do_justifyright()
pass
def do_indent(self, *args):
#-print 'do_indent:'
self.window.present()
self.edit.do_indent()
pass
def do_outdent(self, *args):
#-print 'do_outdent:'
self.edit.do_outdent()
pass
def do_subscript(self, *args):
#-print 'do_subscript:'
self.window.present()
self.edit.do_subscript()
pass
def do_superscript(self, *args):
#-print 'do_superscript:'
self.window.present()
self.edit.do_superscript()
pass
def on_about(self, *args):
#-print 'on_about:'
authors = [
"Jiahua Huang <jhuangjiahua(at)gmail.com>",
"Aron Xu <happyaron.xu(at)gmail.com>",
]
about = gobject.new(gtk.AboutDialog,
name=_("GWrite"),
program_name=_("GWrite"),
logo_icon_name="gwrite",
version=__version__,
copyright=_("Copyright (C) 2009-2010 Jiahua Huang, Aron Xu"),
comments=_("Simple GTK+ HTML5 Rich Text Editor"),
license="LGPLv3+",
website="http://gwrite.googlecode.com/",
website_label="gwrite.googlecode.com",
authors=authors)
#about.set_transient_for(self.window)
about.run()
about.destroy()
pass
def hide_findbar(self, *args):
#-print 'hide_findbar:'
self.findbar.hide()
pass
def do_highlight_text_matches(self, *args):
text = self.entry_searchtext.get_text()
if text:
self.edit.unmark_text_matches()
matches = self.edit.mark_text_matches(text, 0, 0)
self.edit.set_highlight_text_matches(1)
self.entry_searchtext.set_tooltip_markup(_("%s matches") % matches)
pass
else:
self.edit.unmark_text_matches()
self.edit.set_highlight_text_matches(0)
self.entry_searchtext.set_tooltip_text(_("Search text"))
pass
pass
def do_find_text_backward(self, *args):
#-print 'do_find_text_backward:'
text = self.entry_searchtext.get_text()
if not text: return
self.edit.do_find_text_backward(text)
pass
def do_find_text(self, *args):
#-print 'do_find_text:'
# 点击前面的图标为向上查找
#if self.entry_searchtext.get_pointer()[0] < 30:
# return self.do_find_text_backward()
text = self.entry_searchtext.get_text()
if text:
self.edit.do_find_text(text)
pass
def do_replace_text(self, *args):
#-print 'do_replace_text:'
ffindtext = self.entry_searchtext.get_text()
replacetext = self.entry_replace_text.get_text()
if ffindtext:
self.edit.do_replace_text(ffindtext, replacetext)
pass
def do_replace_text_all(self, *args):
#-print 'do_replace_text_all:'
ffindtext = self.entry_searchtext.get_text()
replacetext = self.entry_replace_text.get_text()
if ffindtext:
self.edit.do_replace_text_all(ffindtext, replacetext)
pass
def get_custom_widget(self, id, string1, string2, int1, int2):
w = gtk.Label(_("(custom widget: %s)") % id)
return w
##cmd test
usage = _('''GWrite
Usage:
gwrite [OPTION...] [FILE...] - Edit html files
Options:
-h, --help Show help options
-v, --version Show version information
''')
def openedit(filename=""):
'''MainWindow() 的包装
要 return False 以免 gtk.idle_add, gtk.timeout_add 重复执行
'''
Windows[0].open(filename)
return False
def _listen(s):
'''监听 unix socket
'''
#-print 'listen:', s
while 1:
conn, addr = s.accept()
rev = conn.recv(102400)
for i in rev.split('\n'):
#-print 'Open:', i
gobject.idle_add(openedit, i)
pass
pass
pass
def main():
'''处理命令行
'''
import os, sys
import socket
## 处理命令行参数
import getopt
config.load()
gtk.gdk.threads_init()
try:
opts, args = getopt.getopt(sys.argv[1:], 'vh', ['version', 'help'])
pass
except:
print usage
return
for o, v in opts:
if o in ('-h', '--help'):
print usage
return
elif o in ('-v', '--version'):
print __version__
return
pass
## 要 打开的文件
editfiles = [ os.path.abspath(i) for i in args ]
## 单实例模式
if config.single_instance_mode:
## 设 profdir 和 ctlfile
profdir = config.profdir
## 单实例运行, 尝试用已打开 GWrite
ctlfile = config.ctlfile
try:
## 已打开 GWrite 的情况
s = socket.socket(socket.AF_UNIX)
s.connect(ctlfile)
s.send('\n'.join(editfiles))
#-print 'sent:', editfiles
return
except:
#raise
#-print 'new:'
pass
## 监听 socket
s = socket.socket(socket.AF_UNIX)
if os.access(ctlfile, os.R_OK): os.remove(ctlfile)
s.bind(ctlfile)
s.listen(1)
thread.start_new_thread(_listen, (s,))
pass
## 打开文件
edit = MainWindow( editfiles[0:] and editfiles[0] or '' )
for i in editfiles[1:]:
i = os.path.abspath(i)
edit.open(i)
pass
## 处理 Gtk 图标主题
settings = gtk.settings_get_default( )
if settings.get_property( 'gtk-icon-theme-name' ) == 'hicolor':
settings.set_property( 'gtk-icon-theme-name', 'Tango')
pass
## 处理额外图标路径
icon_theme = gtk.icon_theme_get_default()
icon_dir = os.path.dirname(__file__) + '/icons'
icon_theme.append_search_path(icon_dir)
##
gtk.gdk.threads_enter()
gtk.main()
gtk.gdk.threads_leave()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
# Author: Huang Jiahua <jhuangjiahua@gmail.com>
# License: LGPLv3+
# Last modified:
app = 'gwrite'
import os, sys
import gettext
if os.path.isdir(os.path.dirname(sys.argv[0]) + '/../build/mo'):
gettext.install(app, os.path.dirname(sys.argv[0]) + '/../build/mo', unicode=True)
else:
gettext.install(app, unicode=True)
if __name__=="__main__":
print _('')
| Python |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''config
@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}
@license: LGPLv3+
'''
import gtk, gobject
import os, sys
try: import cPickle as pickle
except: import pickle
try: import i18n
except: from gettext import gettext as _
single_instance_mode = 0
mdi_mode = 1
def getconf():
'''获取 config
'''
config = {}
##
profdir = os.environ['HOME'] + '/.config/GWrite'
if not os.path.isdir(profdir): os.makedirs(profdir)
ctlfile = profdir + '/gwrite.ctl' + os.environ['DISPLAY']
prof = profdir + '/gwrite.conf'
user_stylesheet_file = profdir + '/user_stylesheet_uri.css'
##
for k, v in globals().items():
if not k.startswith('__') and (
isinstance(v, str)
or isinstance(v, int)
or isinstance(v, long)
or isinstance(v, float)
or isinstance(v, dict)
or isinstance(v, list)
or isinstance(v, bool)
):
config[k] = v
pass
config['profdir'] = profdir
config['ctlfile'] = ctlfile
config['prof'] = prof
config['user_stylesheet_file'] = user_stylesheet_file
return config
def load():
'''读取 config
'''
config = getconf()
##
try: config.update(pickle.loads(file(config['prof']).read()))
except: pass
##
globals().update(config)
return config
def write():
'''保存 config
'''
config = getconf()
file(config['prof'], 'w').write(pickle.dumps(config))
return config
def show_preference_dlg(title=_("Preferences"), parent=None, *args):
'''首选项对话框
'''
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
dlg.set_default_size(200, 300)
##
config = getconf()
##
notebook1 = gtk.Notebook()
notebook1.set_tab_pos(gtk.POS_TOP)
notebook1.set_scrollable(False)
notebook1.show()
vbox1 = gtk.VBox(False, 0)
vbox1.show()
vbox1.set_spacing(0)
checkbutton_mdi_mode = gtk.CheckButton()
checkbutton_mdi_mode.set_active(False)
checkbutton_mdi_mode.set_label(_("Use Tabs MDI interface"))
checkbutton_mdi_mode.set_tooltip_text(_("Supports editing multiple files in one window (known sometimes as tabs or MDI)"))
checkbutton_mdi_mode.show()
checkbutton_mdi_mode.set_border_width(10)
checkbutton_mdi_mode.set_relief(gtk.RELIEF_NORMAL)
vbox1.pack_start(checkbutton_mdi_mode, False, False, 0)
checkbutton_single_instance_mode = gtk.CheckButton()
checkbutton_single_instance_mode.set_active(False)
checkbutton_single_instance_mode.set_label(_("Single Instance mode"))
checkbutton_single_instance_mode.set_tooltip_text(_("Only one instance of the application will be running at a time."))
checkbutton_single_instance_mode.show()
checkbutton_single_instance_mode.set_border_width(10)
checkbutton_single_instance_mode.set_relief(gtk.RELIEF_NORMAL)
vbox1.pack_start(checkbutton_single_instance_mode, False, False, 0)
hseparator1 = gtk.HSeparator()
hseparator1.show()
vbox1.pack_start(hseparator1, False, False, 0)
label2 = gtk.Label(_("You need to restart gwrite for some options to take effect."))
label2.set_alignment(0, 0)
label2.set_angle(0)
label2.set_padding(20, 20)
label2.set_line_wrap(True)
label2.set_width_chars(30)
label2.show()
vbox1.pack_start(label2)
label1 = gtk.Label(_("Run mode"))
label1.set_angle(0)
label1.set_padding(0, 0)
label1.set_line_wrap(False)
label1.show()
notebook1.append_page(vbox1, label1)
##
checkbutton_mdi_mode.set_active(config.get("mdi_mode", 0))
checkbutton_single_instance_mode.set_active(config.get("single_instance_mode", 0))
##
dlg.vbox.pack_start(notebook1, True, True, 0)
resp = dlg.run()
##
config['mdi_mode'] = checkbutton_mdi_mode.get_active()
config['single_instance_mode'] = checkbutton_single_instance_mode.get_active()
##
dlg.destroy()
if resp == gtk.RESPONSE_CANCEL:
return {}
globals().update(config)
return config
if __name__=="__main__":
load()
print show_preference_dlg()
write()
| Python |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
# Author: Huang Jiahua <jhuangjiahua@gmail.com>
# License: LGPLv3+
# Last modified:
__version__ = '0.5.1'
import gtk, gobject
import webkit
import jswebkit
import gtklatex
import urllib2
import os, errno
import re
import docfilter
try: import i18n
except: from gettext import gettext as _
def format_html(html):
'''给 html 添加适当换行
'''
html = re.sub('\n?\ ?<(address|blockquote|center|dir|div|dl|fieldset|form|h1|h2|h3|h4|h5|h6|hr|isindex|menu|noframes|noscript|ol|p|pre|table|ul|dd|dt|frameset|li|tbody|td|tfoot|th|thead|tr)([^>]*?)>', '\n<\\1\\2>', html)
html = re.sub('</(address|blockquote|center|dir|div|dl|fieldset|form|h1|h2|h3|h4|h5|h6|hr|isindex|menu|noframes|noscript|ol|p|pre|table|ul|dd|dt|frameset|li|tbody|td|tfoot|th|thead|tr)([^>]*?)>\ ?\n?', '</\\1\\2>\n', html)
html = re.sub('\n?<(img|hr|br)([^>]*?)>\n?', '\n<\\1\\2>\n', html)
## 对于 pre,合并相邻 <pre>,将 <pre> 内的 <br> 转为 "\n"
html = re.sub('\n?</pre>\s*<pre>\n?', '', html)
html = re.sub('<pre>[^\0]*?</pre>', lambda m: re.sub('\n?<br>\n?', '\\n', m.group()), html)
return html
def stastr(stri):
'''处理字符串的 ' "
'''
return stri.replace("\\","\\\\").replace(r'"',r'\"').replace(r"'",r"\'").replace('\n',r'\n')
def get_end_ids(start):
'''获取可能的下一个 id
@note: 为了符合 w3 的 id 命名,在数字前加上字母 g 前缀
>>> get_end_ids('g5.1.1.1')
['g5.1.1.2', 'g5.1.2', 'g5.2', 'g6']
'''
start = start.replace('g', '')
ids = start.split('.')
ends = []
for i in range(1, len(ids)+1):
end = int(ids[-i])+1
end = '.'.join(ids[:-i] + [str(end)])
ends.append('g' + end)
pass
return ends
def textbox(title='Text Box', label='Text',
parent=None, text=''):
"""display a text edit dialog
return the text , or None
"""
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
dlg.set_default_size(500,500)
#lbl = gtk.Label(label)
#lbl.set_alignment(0, 0.5)
#lbl.show()
#dlg.vbox.pack_start(lbl, False)
gscw = gtk.ScrolledWindow()
gscw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
textview=gtk.TextView()
textview.set_wrap_mode(gtk.WRAP_WORD_CHAR)
buffer = textview.get_buffer()
if text: buffer.set_text(text)
#textview.show()
gscw.add(textview)
#gscw.show()
dlg.vbox.pack_start(gscw)
dlg.show_all()
resp = dlg.run()
text=buffer.get_text(buffer.get_start_iter(),buffer.get_end_iter())
dlg.destroy()
if resp == gtk.RESPONSE_OK:
return text
return None
def menu_find_with_stock(menu, stock):
'''查找菜单中对应 stock 的菜单项位置
'''
n = 0
for i in menu.get_children():
try:
if i.get_image().get_stock()[0] == stock:
return n
except:
pass
n += 1
pass
return -1
BLANKHTML='''<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="generator" content="GWrite (WYSIWYG editor)" />
<title></title>
<style>
img{
border: 2px;
border-style: solid;
border-color: #c3d9ff;
padding: 5px;
}
h1, h2, h3, h4, h5, h6 {
color: #7DA721;
font-weight: bold;
}
p{
text-indent: 2em;
text-align: justify;
}
blockquote{
background-color:#EEFFFF;
border-left: 5px solid green;
padding-left: 5px;
margin: 0px;
padding: 5px;
}
pre{
background-color:#EEEEFF;
display: block;
border-left: 1px solid green;
margin: 0px;
padding: 5px;
}
code{
background-color:#EEEEFF;
margin: 15px;
padding: 5px;
}
</style>
</head>
<body>
<p><br/></p>
</body>
</html>
'''
class WebKitEdit(webkit.WebView):
'''Html Edit Widget
'''
def __init__(self, editfile=''):
'''WebKitEdit.__init__
'''
webkit.WebView.__init__(self)
self.set_property('can-focus', True)
self.set_property('can-default', True)
self.set_full_content_zoom(1)
self.write_html(BLANKHTML)
self.lastDir = ''
self.editfile = editfile
if editfile and os.access(editfile, os.R_OK):
self.open(editfile)
self.lastDir = os.path.dirname(editfile)
pass
else:
pass
self.set_editable(1)
#self.do_editable()
self.connect("load-finished", self.do_editable) # 确保 document.body 已经准备好了
self._html = ""
self.connect("navigation-requested", self.on_navigation_requested)
self.connect("new-window-policy-decision-requested", self.on_new_window_policy_decision_requested)
self.connect_after("populate-popup", self.populate_popup)
self.connect("script-prompt", self.on_script_prompt)
## 允许跨域 XMLHttpRequest,以便 base64 内联图片
settings = self.get_settings()
settings.set_property('enable-xss-auditor', False)
settings.set_property('enable-universal-access-from-file-uris', True)
settings.set_property('enable-file-access-from-file-uris', True)
settings.set_property('enable-page-cache', True)
settings.set_property('javascript-can-access-clipboard', True)
settings.set_property('tab-key-cycles-through-elements', False)
##
pass
def open(self, editfile):
'''打开文件
'''
self.editfile = editfile
## 导入 .doc
if re.match('.*\.doc', editfile, re.I):
self.editfile = editfile[:-4] + '.html'
editfile = docfilter.doc2html(editfile)
pass
## 导入 .odf
elif re.match('.*\.odf', editfile, re.I):
pass
## 导入 .rtf
elif re.match('.*\.rtf', editfile, re.I):
pass
## 打开 html
webkit.WebView.open(self, editfile)
pass
def ctx(self, *args):
'''获取 javascript ctx 对象
'''
ctx = jswebkit.JSContext(self.get_main_frame().get_global_context())
return ctx
def eval(self, js):
'''用 ctx 对象执行 javascript
'''
return self.ctx().EvaluateScript(js)
def get_html(self, *args):
'''获取 HTML 内容
'''
if not self.get_view_source_mode():
self.execute_script('guesstitle();')
self.do_image_base64()
html = self.ctx().EvaluateScript('document.documentElement.innerHTML')
html = format_html(html)
return '<!DOCTYPE html>\n<html>\n%s\n</html>\n' % html
else:
text = self.eval('''
html = document.body.innerHTML;
html = html.replace(/td><td/g, 'td>\\n<td');
document.body.innerHTML = html;
text = document.body.textContent;
text;''')
text = re.sub('\\s*</body>', '\n</body>', text)
text = text.replace(' \n', '\n')
text = format_html(text)
return text
def get_section(self, *args):
#@TODO: 用于查看章节字数等
js = '''
var range = document.createRange();
range.setStart(startNode, startOffset);
range.setEnd(endNode, endOffset);
'''
pass
def get_selection(self, *args):
'''获取选中区域的文本
'''
text = self.ctx().EvaluateScript('''
document.getSelection().toString();
''')
return text
def get_text(self, *args):
'''获取纯文本内容
处理过换行
'''
text = self.ctx().EvaluateScript('''
//text = document.body.textContent;
html = document.body.innerHTML;
html = html.replace(/<h/g, '\\n<h');
html = html.replace(/<p/g, '\\n<p');
html = html.replace(/<t/g, '\\n<t');
html = html.replace(/<br/g, '\\n<br');
html = html.replace(/<bl/g, '\\n<bl');
html = html.replace(/<div/g, '\\n<div');
i = document.createElement("div");
i.innerHTML = html;
text = i.textContent;
text;''')
return text
def set_saved(self, *args):
'''设置为已经保存
'''
self._html = self.ctx().EvaluateScript('document.documentElement.innerHTML')
pass
def unset_saved(self, *args):
'''设置为未保存
'''
self._html = ""
pass
def is_saved(self, *args):
'''查询是否已经保存
'''
return self._html == self.ctx().EvaluateScript('document.documentElement.innerHTML')
def on_new_window_policy_decision_requested(self, widget,
WebKitWebFrame, WebKitNetworkRequest,
WebKitWebNavigationAction, WebKitWebPolicyDecision):
'''处理新窗口事件
如点击 target=_blank 的链接
'''
uri = WebKitNetworkRequest.get_uri()
uri = urllib2.unquote(uri)
os.spawnvp(os.P_NOWAIT, 'xdg-open', ['xdg-open', uri])
return True
def on_script_prompt(self, view, WebKitWebFrame, key, value, gpointer):
'''处理 script-prompt 事件
'''
#-print key, value
## 更新 LaTex 公式的情况
if key.startswith('_#uptex:'):
id = key[8:]
latex = value[8:].replace('\\\\', '\\')
latex = gtklatex.latex_dlg(latex)
if latex:
img = gtklatex.tex2base64(latex)
self.execute_script("""
window.focus();
img = document.getElementById('%s');
img.alt = "mimetex:"+"%s";
img.src='%s';
""" % (id, stastr(latex), stastr(img)))
pass
self.execute_script("""document.getElementById('%s').removeAttribute("id");""" % id)
return True
return
def on_navigation_requested(self, widget, WebKitWebFrame, WebKitNetworkRequest):
'''处理点击链接事件
如点击超链接后应执行 xdg-open 用桌面浏览器打开 URL
'''
#-print 'on_navigation_requested:'
#print WebKitWebFrame, WebKitNetworkRequest
uri = WebKitNetworkRequest.get_uri()
uri = urllib2.unquote(uri)
#-print uri
# self.open() 的情况
if uri == 'file://' + self.editfile:
return False
# 跳转锚点
if uri.startswith('#'):
self.go_anchor(uri)
return True
# 打开外部链接
docuri = self.get_main_frame().get_uri()
if docuri.split('#', 1)[0] != uri.split('#', 1)[0]:
os.spawnvp(os.P_NOWAIT, 'xdg-open', ['xdg-open', uri])
return True
return False
def populate_popup(self, view, menu):
'''处理编辑区右键菜单
'''
# 无格式粘贴菜单
text = gtk.Clipboard().wait_for_text() or gtk.Clipboard(selection="PRIMARY").wait_for_text()
if text:
menuitem_paste_unformatted = gtk.ImageMenuItem(_("Pa_ste Unformatted"))
menuitem_paste_unformatted.show()
#menuitem_paste_unformatted.connect("activate", self.do_paste_unformatted)
menuitem_paste_unformatted.connect("activate",
lambda *i: self.do_insert_text(text) )
n = menu_find_with_stock(menu, 'gtk-paste')
if n > -1:
menu.insert(menuitem_paste_unformatted, n+1)
pass
pass
return False
def select_section(self, widget, anc=""):
'''在编辑区选中 id 对应章节文字
'''
if not anc: anc = widget
start_id = anc.replace('#', '')
end_ids = get_end_ids(start_id)
#-print 'select_section:', start_id, end_ids
self.go_anchor(anc)
self.eval('''
//document.execCommand("selectall", false, false);
start = document.getElementById("%s");
ids = %s;
for (var i=0; i<ids.length; i++){
if (end = document.getElementById(ids[i])) break;
}
sel = document.getSelection();
sel.collapse(start, 0);
if (end){
sel.extend(end, 0);
} else {
end = document.createElement('span');
document.body.appendChild(end);
sel.extend(end, 0);
document.body.removeChild(end);
}
window.focus();
''' % (start_id, end_ids))
pass
def go_anchor(self, widget, anc=""):
'''跳转到锚点
'''
if not anc: anc = widget
anc = anc.replace('#', '')
return self.execute_script("window.location.href='#%s';" % anc);
#self.execute_script("""
#el = document.getElementById("%s");
#window.scrollTo(0, el.offsetTop);
#""" % anc)
def write_html(self, html):
'''写入 HTML
'''
#print 'WebKitEdit.write_html:'
self.load_html_string(html, 'file:///tmp/blank.html')
pass
def update_html(self, html):
'''更新 html
用 dom 操作保留页面滚动条位置
'''
#print 'WebKitEdit.update_html:'
# js document 无法更新 head:
# console message: @3: Error: NO_MODIFICATION_ALLOWED_ERR: DOM Exception 7
# 所以改为直接 load_html_string()
uri = (self.editfile.startswith('/') and 'file://' + self.editfile) or 'file:///tmp/blank.html'
self.load_html_string(html, uri)
return
#head = (re.findall(r'''<head>([^\0]*)</head>''', html)+[""])[0]
#body = re.sub(r'''<head>[^\0]*</head>''', '', html)
#self.execute_script(r'''
# document.body.innerHTML="%s";
# document.getElementsByTagName("head")[0].innerHTML="%s";
# '''% (stastr(body), stastr(head)))
#pass
def update_bodyhtml(self, html):
'''更新正文 html
'''
#print 'WebKitEdit.update_bodyhtml:'
self.execute_script(r'''
document.body.innerHTML="%s";
'''%stastr(html))
pass
def do_editable(self, *args):
'''set editable
'''
#@TODO: 加入更新标题,更新目录 js 函数
#-print 'WebKitEdit.set_editable:'
#cmd = r''' document.documentElement.contentEditable="true"; '''
self.set_editable(1)
cmd = r'''
/*document.documentElement.contentEditable="true";*/
/*document.body.contentEditable="true";*/
document.execCommand("useCSS",false, true);
/* 处理标题 */
function guesstitle(){
if( (t=document.getElementsByTagName("title")) && (title=t[0].textContent) ){
return title;
}else if( (h1=document.getElementsByTagName("h1")) && h1.length>0 ){
title=h1[0].textContent;
} else {
//p = document.createElement('pre');
//p.innerHTML = document.body.innerHTML.replace(/</g, '\n\n<').replace(/>/g, '>\n\n');
//title = p.textContent.replace(/^\s+/g, '').split('\n')[0];
title = document.body.textContent.split('\n')[0];
}
if(! document.getElementsByTagName("title") ){
t = document.createElement('title');
t.textContent=title;
document.getElementsByTagName("head")[0].appendChild(t);
}else{
document.getElementsByTagName("title")[0].textContent=title;
}
};
/* 目录处理 */
function getheads(){
/* 取得所有 heading 标签到 heads */
tags = document.getElementsByTagName("*");
heads = new Array();
for (var i=0; i<tags.length; i++){
t = tags[i].nodeName;
if (t == "H1" || t == "H2" || t == "H3" || t == "H4" ||
t == "H5" || t == "H6"){
heads.push(tags[i]);
}
}
return heads;
};
autonu = 0;
if( i = document.body.getAttribute("orderedheadings")){
autonu = i;
}
function toggledirnu(){
if (autonu == 1){
autonu = 0;
document.body.setAttribute("orderedheadings", 0);
}else{
autonu = 1;
document.body.setAttribute("orderedheadings", 1);
}
return updatedir();
};
function getiname(tt, name){
if (autonu == 1){
return tt + ' ' + iname;
}else{
return iname;
}
}
function getdir(){
heads = getheads();
tt = '';
tdir = '';
h1 = 0;
h2 = 0;
h3 = 0;
h4 = 0;
h5 = 0;
h6 = 0;
startHeader = 0;
startHeader = 1;
for (var i=startHeader ; i<heads.length; i++){
inode = heads[i];
iname = inode.textContent.replace(/^\s*[.\d]*\s+/, ''); /*把标题前边的数字识别为序号*/
iname = iname.replace('\n',' ');
switch(heads[i].nodeName){
case "H1":
tt = '';
h1 += 1;
h2 = 0;
h3 = 0;
h4 = 0;
h5 = 0;
h6 = 0;
tt += String(h1);
inode.id = "g" + tt;
inode.textContent = getiname(tt, name);
tdir += '';
tdir += '<a href="#g' + tt + '">' + getiname(tt, name) + '</a>\n';
break;
case "H2":
tt = '';
h2 += 1;
h3 = 0;
h4 = 0;
h5 = 0;
h6 = 0;
tt += String(h1) + '.' + h2;
inode.id = "g" + tt;
inode.textContent = getiname(tt, name);
tdir += ' ';
tdir += '<a href="#g' + tt + '">' + getiname(tt, name) + '</a>\n';
break;
case "H3":
tt = '';
h3 += 1;
h4 = 0;
h5 = 0;
h6 = 0;
tt += String(h1) + '.' + h2 + '.' + h3;
inode.id = "g" + tt;
inode.textContent = getiname(tt, name);
tdir += ' ';
tdir += '<a href="#g' + tt + '">' + getiname(tt, name) + '</a>\n';
break;
case "H4":
tt = '';
h4 += 1;
h5 = 0;
h6 = 0;
tt += String(h1) + '.' + h2 + '.' + h3 + '.' +h4;
inode.id = "g" + tt;
inode.textContent = getiname(tt, name);
tdir += ' ';
tdir += '<a href="#g' + tt + '">' + getiname(tt, name) + '</a>\n';
break;
case "H5":
tt = '';
h5 += 1;
h6 = 0;
tt += String(h1) + '.' + h2 + '.' + h3 + '.' + h4 + '.' + h5;
inode.id = "g" + tt;
inode.textContent = getiname(tt, name);
tdir += ' ';
tdir += '<a href="#g' + tt + '">' + getiname(tt, name) + '</a>\n';
break;
case "H6":
tt = '';
h6 += 1;
tt += String(h1) + '.' + h2 + '.' + h3 + '.' + h4 + '.' + h5 + '.' + h6;
inode.id = "g" + tt;
inode.textContent = getiname(tt, name);
tdir += ' ';
tdir += '<a href="#g' + tt + '">' + getiname(tt, name) + '</a>\n';
break;
}
}
pre = document.createElement('pre');
pre.innerHTML = tdir;
tdir = pre.innerHTML;
return tdir;
}
function updatedir(){
if( i = document.body.getAttribute("orderedheadings")){
autonu = i;
}
dirhtml = getdir();
if (t=document.getElementById("toctitledir")){
t.innerHTML = dirhtml;
}
return dirhtml;
};
function randomChar(l) {
var x="0123456789qwertyuioplkjhgfdsazxcvbnm";
var tmp="";
for(var i=0;i< l;i++) {
tmp += x.charAt(Math.ceil(Math.random()*100000000)%x.length);
}
return tmp;
}
function uptex(img){
img.id = 'mimetex_' + randomChar(5);
prompt("_#uptex:"+img.id, img.alt);
}
window.focus();
;'''
self.execute_script(cmd)
pass
def do_image_base64(self, *args):
'''convert images to base64 inline image
see http://tools.ietf.org/html/rfc2397
'''
gtk.gdk.threads_leave() # 修正线程问题
self.execute_script(r'''
var keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
function encode64(input) {
var output = "";
var chr1, chr2, chr3;
var enc1, enc2, enc3, enc4;
var i = 0;
do {
chr1 = input.charCodeAt(i++) & 0xff;
chr2 = input.charCodeAt(i++) & 0xff;
chr3 = input.charCodeAt(i++) & 0xff;
enc1 = chr1 >> 2;
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2) +
keyStr.charAt(enc3) + keyStr.charAt(enc4);
} while (i < input.length);
return output;
};
/*netscape.security.PrivilegeManager.enablePrivilege("UniversalBrowserRead");*/
for (var i=document.images.length-1; i+1; i--){
img = document.images[i];
if(img.src && !img.src.match(/^data:/)){
mx = new XMLHttpRequest();
mx.open("GET", img.src, false);
mx.overrideMimeType('text/plain; charset=x-user-defined');
mx.send(null);
if (mx.responseText && (mx.status==200 || mx.status==0) ){
img.setAttribute('uri', img.src);
img.src = "data:image;base64," + encode64(mx.responseText);
};
}
};
;
''')
pass
def set_visual_view(self, visual=True):
'''切换所见所得模式
'''
if visual:
if self.get_view_source_mode():
self.toggle_html_view()
pass
else:
if not self.get_view_source_mode():
self.toggle_html_view()
pass
pass
pass
__prev_visual_html = ''
__prev_source_html = ''
def toggle_html_view(self, *args):
'''查看源码
'''
#print 'WebKitEdit.toggle_html_view:'
## 转换模式会修改 html 源码,所以得判断下可有保存
## 从所见所得模式转到源码模式
if not self.get_view_source_mode():
html = self.get_html()
self.__prev_visual_html = html
is_saved = self.is_saved()
self.set_view_source_mode(1)
self.reload()
self.update_html(html)
def do_is_saved():
if is_saved:
self.__prev_source_html = self.get_html()
pass
else:
self.__prev_source_html = ''
pass
gobject.idle_add(do_is_saved)
pass
## 从源码模式转到所见所得模式
else:
html = self.get_html()
self.set_view_source_mode(0)
self.reload()
if self.__prev_source_html == html:
self.update_html(self.__prev_visual_html)
gobject.idle_add(self.set_saved)
pass
else:
self.update_html(html)
pass
pass
return
def do_print(self, *args):
'''打印
'''
#print 'WebKitEdit.do_print:'
self.execute_script('print(); ')
pass
def do_undo(self, *args):
'''撤销
'''
#print 'WebKitEdit.do_undo:'
self.execute_script(' document.execCommand("undo", false, false); ')
def do_redo(self, *args):
'''重做
'''
#print 'WebKitEdit.do_redo:'
self.execute_script(' document.execCommand("redo", false, false); ')
def do_cut(self, *args):
'''剪切
'''
#print 'WebKitEdit.do_cut:'
self.cut_clipboard()
def do_copy(self, *args):
'''复制
'''
#print 'WebKitEdit.do_copy:'
self.copy_clipboard()
def do_paste(self, *args):
'''粘贴
'''
#print 'WebKitEdit.do_paste:'
#self.execute_script(' document.execCommand("paste", false, false); ')
self.paste_clipboard()
def do_paste_unformatted(self, *args):
'''无格式粘贴
'''
#-print 'do_paste_unformatted:'
text = gtk.Clipboard().wait_for_text() or gtk.Clipboard(selection="PRIMARY").wait_for_text()
if text:
self.do_insert_text(text)
return
return
def do_delete(self, *args):
'''删除选中内容
'''
#print 'WebKitEdit.do_delete:'
self.execute_script(' document.execCommand("delete", false, false); ')
def do_selectall(self, *args):
'''全选
'''
#print 'WebKitEdit.do_selectall:'
self.execute_script(' document.execCommand("selectall", false, false); ')
################################################
#
def do_view_update_contents(self, *args):
'''更新文档目录
依据标题样式
'''
#print 'WebKitEdit.view_update_contents:'
return self.eval(r'''
updatedir();
''')
pass
def do_view_toggle_autonumber(self, *args):
'''切换标题自动编号
'''
#print 'WebKitEdit.view_toggle_autonumber:'
return self.eval(r'''
toggledirnu();
''')
pass
def do_view_sourceview(self, *args):
'''查看源码
'''
#print 'WebKitEdit.view_sourceview:'
self.toggle_html_view()
pass
def do_insertimage(self, img=""):
'''插入图片
'''
#if img.startswith('/'): img = 'file://' + img
#print 'WebKitEdit.do_insertimage:', img
self.execute_script('''
document.execCommand("insertimage", false, "%s");
'''%stastr(img))
pass
def do_createlink(self, link=""):
'''创建超链接
'''
#print 'WebKitEdit.do_createlink:'
self.execute_script(r'''
link = "%s";
if( !document.execCommand("createlink", false, link) )
{
text = link;
i = document.createElement("div");
i.textContent = text;
text = i.innerHTML;
html = '<a href="' + link + '">' + text + '</a>';
document.execCommand("inserthtml", false, html);
}
'''%stastr(link))
pass
def do_inserthorizontalrule(self, *args):
'''插入水平线
'''
#print 'WebKitEdit.do_inserthorizontalrule:'
self.execute_script('''
document.execCommand("inserthorizontalrule", false, false); ''')
pass
def do_insert_table(self, rows, cows):
'''插入表格
会询问行、列数
'''
#print 'WebKitEdit.do_insert_table:'
html = "\n<table cellspacing='0' border='1px' bordercolor='#aaaacc' width='100%' ><tbody>\n"
for row in range(int(rows)):
html+= "<tr>\n"
for cow in range(int(cows)):
html+= " <td><br/></td>\n"
html+= "</tr>\n"
html+= "</tbody></table>\n<br/>\n"
self.do_insert_html(html)
pass
def do_insert_html(self, html):
'''插入 html
'''
#print 'WebKitEdit.do_insert_html:'
if not self.get_view_source_mode():
self.execute_script('''
window.focus();
document.execCommand("inserthtml", false, "%s");
'''%stastr(html))
pass
else:
self.execute_script('''
var text = "%s";
var i = document.createElement("div");
i.textContent = text;
html = i.innerHTML;
html = html.replace(/\\ \\ /g, ' ');
document.execCommand("inserthtml", false, html);
'''%stastr(html))
pass
pass
def do_insert_text(self, text):
'''插入纯文本
会先专为 html 再 insert_htm
'''
#print 'WebKitEdit.do_insert_text:'
self.execute_script('''
var text = "%s";
var i = document.createElement("div");
i.textContent = text;
html = i.innerHTML;
html = html.replace(/\\ \\ /g, ' ');
html = html.replace(/\\n/g, '<br />\\n');
document.execCommand("inserthtml", false, html);
'''%stastr(text))
pass
def do_insert_contents(self, *args):
'''插入目录
'''
#print 'WebKitEdit.do_insert_contents:'
#@FIXME: 无法删除现存目录表格
self.execute_script(r'''
if(t=document.getElementById("toctitle")){
document.removeChild(t);
}
html = '<br/><div id="toctitle" contentEditable="false" style="\
text-indent: 0; background-color:#EEEEFF; display: block; border: 1px solid green; margin: 15px; padding: 5px; white-space: pre;"\
><div title="点击固定目录" onclick=\' t = document.getElementById("toctitle"); if(this.alt){ this.alt = 0; document.body.style.cssText=" "; t.style.cssText="\
text-indent: 0; background-color:#EEEEFF; display: block; border: 1px solid green; margin: 15px; padding: 5px; white-space: pre; "\
; }else{ this.alt = 1; document.body.style.cssText="\
margin:5pt; border:5pt; height:100%; width:70%; overflow-y:auto;"\
; t.style.cssText="\
text-indent: 0; background-color:#EEEEFF; display: block; border-left: 1px solid green; margin: 0px; padding: 5px; white-space: pre; top:0px; right:0; width:25%; height:98%; overflow:auto; position:fixed; "\
; } \' class="dirtitle">目录<br/></div><span id="toctitledir"> </span></div><br/>';
document.execCommand("inserthtml", false, html);
updatedir();
'''.replace("目录", _("Table of contents")))
pass
def do_formatblock_p(self, *args):
'''段落样式
'''
#print 'WebKitEdit.do_formatblock_p:'
return self.eval('''
document.execCommand("formatblock", false, "p");
updatedir();
''')
pass
def do_formatblock_h1(self, *args):
'''<h1> 样式
@NOTE: 将一行字设为标题后,回车出现的是 <div> 而非 <p>。需要让他换行后使用 <p>。考虑在下一行添加一个 <p>
'''
#print 'WebKitEdit.do_formatblock_h1:'
return self.eval('''
document.execCommand("formatblock", false, "h1");
updatedir();
''')
pass
def do_formatblock_h2(self, *args):
'''<h2> 样式
'''
#print 'WebKitEdit.do_formatblock_h2:'
return self.eval('''
document.execCommand("formatblock", false, "h2");
updatedir();
''')
pass
def do_formatblock_h3(self, *args):
'''<h3> 样式
'''
#print 'WebKitEdit.do_formatblock_h3:'
return self.eval('''
document.execCommand("formatblock", false, "h3");
updatedir();
''')
pass
def do_formatblock_h4(self, *args):
'''<h4> 样式
'''
#print 'WebKitEdit.do_formatblock_h4:'
return self.eval('''
document.execCommand("formatblock", false, "h4");
updatedir();
''')
pass
def do_formatblock_h5(self, *args):
'''<h5> 样式
'''
#print 'WebKitEdit.do_formatblock_h5:'
return self.eval('''
document.execCommand("formatblock", false, "h5");
updatedir();
''')
pass
def do_formatblock_h6(self, *args):
'''<h6> 样式
'''
#print 'WebKitEdit.do_formatblock_h6:'
return self.eval('''
document.execCommand("formatblock", false, "h6");
updatedir();
''')
pass
def do_insertunorderedlist(self, *args):
'''圆点列表
'''
#print 'WebKitEdit.do_insertunorderedlist:'
return self.eval('''
document.execCommand("insertunorderedlist", false, null); ''')
pass
def do_insertorderedlist(self, *args):
'''数字列表
'''
#print 'WebKitEdit.do_insertorderedlist:'
return self.eval('''
document.execCommand("insertorderedlist", false, null); ''')
pass
def do_formatblock_div(self, *args):
''' DIV 样式
'''
#print 'WebKitEdit.formatblock_div:'
return self.eval('''
document.execCommand("formatblock", false, "div"); ''')
pass
def do_formatblock_address(self, *args):
'''地址样式
'''
#print 'WebKitEdit.formatblock_addres:'
return self.eval('''
document.execCommand("formatblock", false, "address"); ''')
pass
def do_formatblock_code(self, *args):
'''<code> 样式
'''
#print 'WebKitEdit.do_formatblock_code:'
#@FIXME: formatblock code 无效
return self.eval('''
document.execCommand("formatblock", false, "code"); ''')
pass
def do_formatblock_blockquote(self, *args):
'''引用/缩进 样式
'''
#print 'WebKitEdit.do_formatblock_blockquote:'
self.eval('''
document.execCommand("formatblock", false, "blockquote"); ''')
pass
def do_formatblock_pre(self, *args):
'''预格式化样式
'''
#print 'WebKitEdit.do_do_formatblock_pre:'
return self.eval('''
document.execCommand("formatblock", false, "pre"); ''')
pass
def do_bold(self, *args):
'''粗体
'''
#print 'WebKitEdit.do_bold:'
self.execute_script('''
document.execCommand("bold", false, null); ''')
pass
def do_underline(self, *args):
'''下划线
'''
#print 'WebKitEdit.do_underline:'
self.execute_script('''
document.execCommand("underline", false, null); ''')
pass
def do_italic(self, *args):
'''斜体
'''
#print 'WebKitEdit.do_italic:'
self.execute_script('''
document.execCommand("italic", false, null); ''')
pass
def do_strikethrough(self, *args):
'''删除线
'''
#print 'WebKitEdit.do_strikethrough:'
self.execute_script('''
document.execCommand("strikethrough", false, null); ''')
pass
def do_font_fontname(self, fontname):
'''设置字体名称
'''
#print 'WebKitEdit.do_font_fontname:'
self.execute_script(r'''
document.execCommand("useCSS", false, true);
document.execCommand("fontname", false, "%s");
'''%fontname)
pass
def do_fontsize(self, fontsize):
'''设置字号
'''
#print 'WebKitEdit.do_fontsize:'
self.execute_script(r'''
document.execCommand("fontsize", false, "%s");
'''%fontsize)
pass
def do_fontsize_1(self, *args):
'''设置字号 1
'''
#print 'WebKitEdit.do_fontsize_1:'
self.do_fontsize(1)
pass
def do_fontsize_2(self, *args):
'''设置字号 2
'''
#print 'WebKitEdit.do_fontsize_2:'
self.do_fontsize(2)
pass
def do_fontsize_3(self, *args):
'''设置字号 3
'''
#print 'WebKitEdit.do_fontsize_3:'
self.do_fontsize(3)
pass
def do_fontsize_4(self, *args):
'''设置字号 4
'''
#print 'WebKitEdit.do_fontsize_4:'
self.do_fontsize(4)
pass
def do_fontsize_5(self, *args):
'''设置字号 5
'''
#print 'WebKitEdit.do_fontsize_5:'
self.do_fontsize(5)
pass
def do_fontsize_6(self, *args):
'''设置字号 6
'''
#print 'WebKitEdit.do_fontsize_6:'
self.do_fontsize(6)
pass
def do_fontsize_7(self, *args):
'''设置字号 7
'''
#print 'WebKitEdit.do_fontsize_7:'
self.do_fontsize(7)
pass
def do_color_forecolor(self, color):
'''设置字体颜色
'''
#print 'WebKitEdit.do_color_forecolor:'
self.execute_script(r'''
document.execCommand("useCSS",false, false);
document.execCommand("foreColor", false, "%s");
document.execCommand("useCSS",false, true);
'''%color)
pass
def do_color_hilitecolor(self, color):
'''设置高亮颜色
即字体背景色
'''
# 设背景色无效 需要 useCSS 选项
#print 'WebKitEdit.do_color_hilitecolor:'
self.execute_script(r'''
document.execCommand("useCSS",false, false);
document.execCommand("hilitecolor", false, "%s");
document.execCommand("useCSS",false, true);
'''%color)
pass
def do_removeformat(self, *args):
'''清除格式
'''
#print 'WebKitEdit.do_removeformat:'
self.execute_script('''
document.execCommand("removeformat", false, null); ''')
pass
def do_justifyleft(self, *args):
'''左对齐
'''
#print 'WebKitEdit.do_justifyleft:'
self.execute_script('''
document.execCommand("justifyleft", false, null); ''')
pass
def do_justifycenter(self, *args):
'''居中
'''
#print 'WebKitEdit.do_justifycenter:'
self.execute_script('''
document.execCommand("justifycenter", false, null); ''')
pass
def do_justifyfull(self, *args):
'''分散对齐
'''
#print 'WebKitEdit.do_justifycenter:'
self.execute_script('''
document.execCommand("justifyfull", false, null); ''')
pass
def do_justifyright(self, *args):
'''右对齐
'''
#print 'WebKitEdit.do_justifyright:'
self.execute_script('''
document.execCommand("justifyright", false, null); ''')
pass
def do_indent(self, *args):
'''增大缩进
'''
#print 'WebKitEdit.do_indent:'
self.execute_script('''
document.execCommand("indent", false, null); ''')
pass
def do_outdent(self, *args):
'''减小缩进
'''
#print 'WebKitEdit.do_outdent:'
self.execute_script('''
document.execCommand("outdent", false, null); ''')
pass
def do_subscript(self, *args):
'''下标
'''
#print 'WebKitEdit.do_subscript:'
self.execute_script('''
document.execCommand("subscript", false, null); ''')
pass
def do_superscript(self, *args):
'''上标
'''
#print 'WebKitEdit.do_subperscript:'
self.execute_script('''
document.execCommand("superscript", false, null); ''')
pass
##
def do_find_text(self, findtext):
'''查找文字
'''
self.search_text(findtext, case_sensitive=0, forward=1, wrap=1)
pass
def do_find_text_backward(self, findtext):
'''向上查找文字
'''
#print 'WebKitEdit.do_find_text_forward:', findtext,
self.search_text(findtext, case_sensitive=0, forward=0, wrap=1)
pass
def do_replace_text(self, findtext, replacetext):
'''查找替换文字
'''
#print 'WebKitEdit.do_replace_text:'
if self.eval("document.getSelection().toString();"):
self.do_insert_text(replacetext)
return
elif self.search_text(findtext, case_sensitive=0, forward=1, wrap=1):
self.do_insert_text(replacetext)
pass
pass
def do_replace_text_all(self, findtext, replacetext):
'''全部替换
'''
#print 'WebKitEdit.do_replace_text_all'
#
## 来到页首
while self.search_text(findtext, case_sensitive=0, forward=0, wrap=0):
self.do_insert_text(replacetext)
pass
## 向下搜索并替换
while self.search_text(findtext, case_sensitive=0, forward=1, wrap=0):
self.do_insert_text(replacetext)
pass
return
if __name__=="__main__":
#print 'WebKitEdit.main'
w=gtk.Window()
w.connect("delete_event", gtk.main_quit)
m=WebKitEdit()
w.add(m)
w.show_all()
gtk.main()
| Python |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
# Author: Huang Jiahua <jhuangjiahua@gmail.com>
# License: GNU LGPL
# Last modified:
"""
"""
__revision__ = '0.1'
if __name__=="__main__":
import gwrite.gwrite
gwrite.gwrite.main()
| Python |
#!/usr/bin/python
from distutils.core import setup
from DistUtilsExtra.command import *
from glob import glob
setup(name='gwrite',
version='0.5.1',
description='HTML5 Doc Writer based on GTK2',
long_description ="""GWrite is a simple HTML5 Doc Writer base on Gtk2.
Features include:
1. HTML Format
2. Indexes and Tables
3. Headings order processor
4. Hyper links
5. Images resize / scale
6. Base64 URL scheme inline images
7. Font size and styles
8. undo/redo
9. Inline pixel margin setting on paragraph / span / block attributes
10. Bullet list / Orderet list
11. Paste image direct from from other application
12. Paste html direct from firefox browser image included
13. Paste excel formated table section copy from openoffice
14. Paste full html page direct from browser image included
""",
author='Jiahua Huang',
author_email='jhuangjiahua@gmail.com',
license='LGPL-3',
url="http://code.google.com/p/gwrite",
download_url="http://code.google.com/p/gwrite/downloads/list",
platforms = ['Linux'],
scripts=['scripts/gwrite'],
packages = ['gwrite'],
package_data = {'gwrite': ['icons/*']},
data_files = [
('share/pixmaps', ['gwrite.png']),
],
include_data_files = [
('.', ['po']),
],
cmdclass = { "build" : build_extra.build_extra,
"build_i18n" : build_i18n.build_i18n,
}
)
| Python |
sfrom sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import backref, mapper, relation, sessionmaker
# Database module
from wrapper import Wrapper
Base = declarative_base()
class Wrapper(object):
_ignore_read = []
_ignore_write = []
def __init__(self, obj):
self.__dict__['_obj'] = obj
def __getattr__(self, key):
if key in self.__dict__:
return self.__dict[key]
elif key not in self._ignore_read:
return getattr(self._obj, key)
else:
raise AttributeError, key
def __setattr__(self, key, value):
if key in self.__dict__:
object.__setattr__(self, key, value)
elif key not in self._ignore_write:
setattr(self._obj, key, value)
else:
raise AttributeError, key
class _User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String(40))
age = Column(Integer)
password = Column(String(40))
def __init__(self, name, age, password=''):
self.name = name
self.age = age
self.password = password
session.add(self)
session.commit()
def __repr__(self):
return "<User('%s','%s', '%s')>" % (self.name, self.age, self.password)
class UserInterface(Wrapper):
_ignore_read = ['password']
_ignore_write = ['name']
@staticmethod
def from_user(cls, row):
return cls(row.name, row.age, row.password)
def __init__(self, name, age, password):
super(Wrapper, self).__init__(_User(name, age, password))
class Database:
def __init__(dbtype, username, password, hostname, port, dbname):
"""Initiate the database"""
db = create_engine("%s://%s:%s@%s:%s/%s" %(dbtype, username, password, hostname, port, dbname))
db.echo = False
users = User.__table__
metadata = Base.metadata
metadata.drop_all(db, checkfirst=True)
metadata.create_all(db)
Session = sessionmaker(bind=db)
self._session = Session()
#control can get all the profiles dont know why it would need it
def getusers(self, sort=''):
all_users = self._session.query(_User).all()
return all_users
#control can get used based on name (so they can populate a profile)
def getuser(self, name):
all_users = self._session.query(_User, _User.name == name)
for user in all_users:
if user[1] == True:
return user[0]
#control can get user based on name (so they can populate a profile)
def getuser(name):
all_users = session.query(User, User.name == name)
for user in all_users:
if user[1] == True:
return user[0]
#internal use - to print a table
def _printdbrows(rs):
print "Table %s:" %(rs[0].__tablename__)
for row in rs:
print " ", row
| Python |
#! /usr/bin/env python
class Wrapper(object):
_ignore_read = []
_ignore_write = []
def __init__(self, obj):
self.__dict__['_obj'] = obj
def __getattr__(self, key):
if key in self.__dict__:
return self.__dict[key]
elif key not in self._ignore_read:
return getattr(self._obj, key)
else:
raise AttributeError, key
def __setattr__(self, key, value):
if key in self.__dict__:
object.__setattr__(self, key, value)
elif key not in self._ignore_write:
setattr(self._obj, key, value)
else:
raise AttributeError, key
"""
class Profile(Wrapper):
_ignore_read = ['password']
_ignore_write = ['username']
_remap_read = {'pname':'name'}
_remap_write = {'pname':'name'}
def __init__(self, username, password):
row = ProfileTable(username, password)
super(Wrapper, self).__init__(row)
"""
| Python |
from sqlalchemy import *
from sqlalchemy.orm import *
hostname = "127.0.0.1"
username = "userpython"
password = "python"
dbname = "test"
db = create_engine("mysql://%s:%s@%s/%s" %(username, password, hostname, dbname))
db.echo = True
metadata = MetaData(db)
users = Table('users', metadata,
Column('user_id', Integer, primary_key=True),
Column('name', String(40)),
Column('age', Integer),
Column('password', String(40)),
)
#if users.exists():
# users.drop()
#users.create()
i = users.insert()
i.execute(name='Mary', age=30, password='secret')
i.execute({'name': 'John', 'age': 42},
{'name': 'Susan', 'age': 57},
{'name': 'Carl', 'age': 33})
s = users.select()
rs = s.execute()
row = rs.fetchone()
print 'Id=%s, Name=%s, Age=%s, Password=%s' % \
(row[0], row['name'], row.age, row[users.c.password])
for row in rs:
print row.name, 'is', row.age, 'years old'
# The users table already exists, so no need to redefine it. Just
# load it from the database using the "autoload" feature.
#users = Table('users', metadata, autoload=True)
#
#def run(stmt):
# rs = stmt.execute()
# for row in rs:
# print " %s"%row
#
## Most WHERE clauses can be constructed via normal comparisons
#print "select users.c.name = John"
#s = users.select(users.c.name == 'John')
#run(s)
#print "select users.c.age < 40"
#s = users.select(users.c.age < 40)
#run(s)
#
#print
#
## Python keywords like "and", "or", and "not" can't be overloaded, so
## SQLAlchemy uses functions instead
#s = users.select(and_(users.c.age < 40, users.c.name != 'Mary'))
#run(s)
#s = users.select(or_(users.c.age < 40, users.c.name != 'Mary'))
#run(s)
#s = users.select(not_(users.c.name == 'Susan'))
#run(s)
#
#print
#
## Or you could use &, | and ~ -- but watch out for priority!
#s = users.select((users.c.age < 40) & (users.c.name != 'Mary'))
#run(s)
#s = users.select((users.c.age < 40) | (users.c.name != 'Mary'))
#run(s)
#s = users.select(~(users.c.name == 'Susan'))
#run(s)
#
#print
#
## There's other functions too, such as "like", "startswith", "endswith"
#s = users.select(users.c.name.startswith('M'))
#run(s)
#s = users.select(users.c.name.like('%a%'))
#run(s)
#s = users.select(users.c.name.endswith('n'))
#run(s)
#
#print
#
## The "in" and "between" operations are also available
#s = users.select(users.c.age.between(30,39))
#run(s)
## Extra underscore after "in" to avoid conflict with Python keyword
#s = users.select(users.c.name.in_('Mary' 'Susan'))
#run(s)
#
#print
#
## If you want to call an SQL function, use "func"
#s = users.select(func.substr(users.c.name, 2, 1) == 'a')
#run(s)
#
#print
#
## You don't have to call select() on a table; it's got a bare form
#s = select([users], users.c.name != 'Carl')
#run(s)
#s = select([users.c.name, users.c.age], users.c.name != 'Carl')
#run(s)
#
#print
#
## This can be handy for things like count()
#s = select([func.count(users.c.user_id)])
#run(s)
## Here's how to do count(*)
#s = select([func.count("*")], from_obj=[users])
#run(s)
#
| Python |
import dbtest
dbtype = "mysql"
hostname = "db4free.net"
username = "userpython"
password = "python"
port = "3306"
dbname = "groupsixdatabase"
success = dbtest.init(dbtype, username, password, hostname, port, dbname)
john = dbtest.User('John', 42, 'pass')
mary = dbtest.User(name='Mary', age=30, password='secret')
jack = dbtest.User('Jack', 53)
tom = dbtest.User('Tom', 12, 'word')
#this gets modified on the database
john.name = 'Jon'
#To modify a age using the name for lookup
dbtest.getuser(name='Mary').age = 56
# get all the user info with name for lookup (useful for populating the user profile)
print "name=Tom: id=%d age=%d pass=%s" %(\
dbtest.getuser(name='Tom').id,
dbtest.getuser(name='Tom').age,
dbtest.getuser(name='Tom').password )
#print the table (internal use only)
dbtest.printdbrows(dbtest.getusers()) | Python |
# Note: this executes in the global scope
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer
import sqlalchemy
engine=sqlalchemy.create_engine('mysql://localhost/test')
debug=False # set this to true and only the generated code is printed.
MYSQL_TO_ALCHEMY_TYPE = dict(
char='String',
int='Integer')
Base = declarative_base()
CLASS_DEF =\
"""
class %s(Base):
\t__tablename__ = '%s'
"""
for tablename in engine.table_names():
cls_name = tablename.capitalize()
definition = CLASS_DEF % (cls_name, tablename)
schema = engine.execute('DESCRIBE %s' % tablename)
for fname, mtype, null, key, default, extra in schema:
mlen = int(mtype[mtype.index('(')+1:mtype.index(')')])
definition += "\t%s=Column(%s(%d), primary_key=%s, nullable=%s, default=%s)\n" % (
fname, MYSQL_TO_ALCHEMY_TYPE[mtype[:mtype.index('(')]],
mlen, True if key == 'PRI' else False,
False if null == 'NO' else True, default)
if debug:
print definition
else:
exec(definition)
Base.metadata.create_all(engine)
#SHOW_DESCRIBE = r"^\s*\|\s*(?P<field>[a-zA-z0-9]+)\s*\|\s*"\
# r"(?P<type>[a-zA-Z0-9]+)\((?P<len>[0-9]+)\)\s*\|\s*"\
# r"(?P<null> NO|YES)\s*\|\s*(?P<key>[0-9a-zA-Z]*)\s*\|\s*"\
# r"(?P<default>[0-9a-zA-Z]+)\s*\|\s*(?P<extra> [0-9a-zA-Z]*)\s*\|$"
| Python |
import os
from google.appengine.ext.webapp import template
import cgi
from google.appengine.ext.webapp.util import login_required
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.api import mail
import config
class EmailHandler(webapp.RequestHandler):
def post(self):
secret_code = self.request.get("secret")
if secret_code != config.SECRET_CODE:
self.response.out.write('fail1')
return
to_addr = self.request.get("to")
from_addr = self.request.get("from")
body = self.request.get("message")
subject = self.request.get("subject")
if not mail.is_email_valid(to_addr):
# Return an error message...
self.response.out.write('fail2')
return
message = mail.EmailMessage()
message.sender = from_addr
message.to = to_addr
message.subject = subject
message.body = body
message.send()
#self.response.out.write('success')
template_values = {}
path = os.path.join(os.path.dirname(__file__), 'success.html')
self.response.out.write(template.render(path, template_values))
class MainPage(webapp.RequestHandler):
def get(self):
template_values = {}
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, template_values))
application = webapp.WSGIApplication(
[
('/', MainPage),
('/email', EmailHandler),
],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
SECRET_CODE = 'whateveryourcodeis'
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
# -*- coding: utf-8 -*-
'''
Copyright Cobalys.com (c) 2011
This file is part of 365Video.
365Video is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
365Video is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with 365Video. If not, see <http://www.gnu.org/licenses/>.
'''
from django.core.paginator import Paginator
def paginator_simple(items_list, max_results, page):
paginator = Paginator(items_list, max_results)
try:
page = int(page)
except:
page = 1
try:
result_paginated = paginator.page(page)
except:
result_paginated = paginator.page(paginator.num_pages)
return result_paginated
def paginator_numeric(items, max_results, page):
variables = dict()
paginator = Paginator(items, max_results)
page = int(page)
page_range = list()
try:
items = paginator.page(int(page))
except:
items = paginator.page(paginator.num_pages)
if paginator.num_pages > 15:
last_page = paginator.num_pages
if page <= 6:
places = (7 - page) + 1
page_range.extend(range(1, page+places))
page_range.append('...')
page_range.extend(range(paginator.num_pages-2, last_page+1))
elif page >= last_page - 4:
places = 6 - (last_page - page)
page_range.extend(range(1, 4))
page_range.append('...')
page_range.extend(range(page - places, last_page+1))
else:
page_range.extend(range(1, 4))
page_range.append('...')
page_range.extend(range(page - 1, page + 2))
page_range.append('...')
page_range.extend(range(paginator.num_pages-2, last_page+1))
else:
page_range.extend(range(1, paginator.num_pages+1))
variables['total_pages'] = paginator.num_pages
variables['page_range'] = page_range
return variables, items
| Python |
# -*- coding: utf-8 -*-
'''
Copyright Cobalys.com (c) 2011
This file is part of 365Video.
365Video is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
365Video is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with 365Video. If not, see <http://www.gnu.org/licenses/>.
'''
from django.utils.translation import ugettext as _
import datetime
def get_month_name(month):
month_names = {
1: _('January'),
2: _('February'),
3: _('March'),
4: _('April'),
5: _('May'),
6: _('June'),
7: _('July'),
8: _('August'),
9: _('September'),
10: _('October'),
11: _('November'),
12: _('December'),
}
return month_names[month]
def get_day_name(year, month, day):
day_names = {
0: _('Monday'),
1: _('Tuesday'),
2: _('Wednesday'),
3: _('Thursday'),
4: _('Friday'),
5: _('Saturday'),
6: _('Sunday'),
}
day_week = datetime.date(year, month, day).weekday()
return day_names[day_week]
| Python |
# -*- coding: utf-8 -*-
'''
Copyright Cobalys.com (c) 2011
This file is part of 365Video.
365Video is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
365Video is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with 365Video. If not, see <http://www.gnu.org/licenses/>.
'''
from django.conf import settings
from django.db import connection
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext as _
from video365.apps.tag.models import Tag
from video365.helpers.date_utils import get_month_name
import json
def generate_date_menu():
cursor = connection.cursor()
cursor.execute('SELECT COUNT(*) AS total, MONTH(publication_date) AS month, YEAR(publication_date) AS year FROM videopost_videopost v WHERE locked = 0 AND enabled = 1 GROUP BY YEAR(publication_date), MONTH(publication_date) ORDER BY year DESC, month')
line = "<div class='sidebar-module'>"
line += "<p id='date-sidebar-title' class='sidebar-module-title'>" + _("Archive") + "</p>"
line += "<div>"
iteration_year = None
for row in cursor.fetchall():
total = row[0]
month_number = row[1]
year_number = row[2]
month_name = get_month_name(month_number)
if iteration_year != year_number:
if iteration_year != None:
line += "</div>"
line += "<h3 class='date-sidebar-year date-sidebar-year-collapsed'><a href='#'/>"
line += str(year_number)
line += "</a></h3>"
line += "<div class='month-date-sidebar'>"
iteration_year = year_number
line += "<p class='date-sidebar-month'><a href='%sdate/1/%d/%d/'>" % (settings.APP_PATH, year_number, month_number)
line += "%s (%d)" % (month_name, total)
line += "</a></p>"
line += "</div>"
line += "</div>"
line += "</div>"
line = line.encode("utf-8")
try:
gendir = '%s/date_menu.inc' % settings.GENERATOR_DIR
f = open(gendir, "w")
try:
f.write(line)
finally:
f.close()
except IOError:
pass
def generate_tag_files():
cursor = connection.cursor()
cursor.execute('SELECT COUNT(1), tag_tag.id, tag_tag.name FROM tag_tag, videopost_videopost, videopost_videopost_tags WHERE videopost_videopost.id = videopost_videopost_tags.videopost_id AND tag_tag.id = videopost_videopost_tags.tag_id AND videopost_videopost.locked = 0 AND videopost_videopost.enabled = 1 GROUP BY tag_tag.id')
if cursor.rowcount > 0:
line = "<div class='sidebar-module'>"
line += "<p id='tag-sidebar-title' class='sidebar-module-title'>" + _("Tags") + "</p>"
line += "<div>"
for row in cursor.fetchall():
count = row[0]
tag_id = row[1]
tag_name = row[2]
tag_name_slug = slugify(tag_name)
line += "<p class='tag-sidebar-item'>"
line += "<a href='%stag/%d/%s.html'>%s (%d)</a>" % (settings.APP_PATH, tag_id, tag_name_slug, tag_name, count)
line += "</p>"
line += "</div>"
line += "</div>"
else:
line = ""
line = line.encode("utf-8")
try:
gendir = '%s/tag_menu.inc' % settings.GENERATOR_DIR
f = open(gendir, "w")
try:
f.write(line)
finally:
f.close()
except IOError:
pass
def generate_tag_js():
tags = Tag.objects.all()
list_tags = []
for tag in tags:
tag_name = tag.name
list_tags.append(tag_name)
line = "availableTags = "
line += json.dumps(list_tags)
line += ";"
line = line.encode("utf-8")
try:
gendir = '%s/js_tags.inc' % settings.GENERATOR_DIR
f = open(gendir, "w")
try:
f.write(line)
finally:
f.close()
except IOError:
pass
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.