code stringlengths 1 1.49M | vector listlengths 0 7.38k | snippet listlengths 0 7.38k |
|---|---|---|
""" Functions to get the initial data for the shock tube problems """
import numpy
import pysph.base.api as base
def get_shock_tube_data(nl, nr, xl, xr,
pl, pr, rhol, rhor, ul, ur,
g1, g2, h0, gamma=1.4,
m0=None):
dxl = numpy.abs(xl)/nl
dxr = numpy.abs(xr)/nr
x = numpy.ones( nl + nr )
x[:nl] = numpy.arange( xl, -dxl+1e-10, dxl )
x[nl:] = numpy.arange( dxr, +xr+1e-10, dxr )
p = numpy.ones_like(x)
p[:nl] = pl
p[nl:] = pr
rho = numpy.ones_like(x)
rho[:nl] = rhol
rho[nl:] = rhor
u = numpy.ones_like(x)
u[:nl] = ul
u[nl:] = ur
e = p/( (gamma-1)*rho )
cs = numpy.sqrt( gamma*p/rho )
if not m0:
m = numpy.ones_like(x) * dxl
else:
m = numpy.ones_like(x) * m0
h = numpy.ones_like(x) * h0
# Extra properties for the ADKE procedure
rhop = numpy.ones_like(x)
div = numpy.ones_like(x)
q = g1 * h * cs
adke = base.get_particle_array(name="fluid", x=x, m=m, rho=rho, h=h,
u=u, p=p, e=e, cs=cs,
rhop=rhop, div=div, q=q)
nbp = 100
# left boundary
x = numpy.ones(nbp)
for i in range(nbp):
x[i] = xl - (i + 1) * dxl
if not m0:
m = numpy.ones_like(x) * dxl
else:
m = numpy.ones_like(x) * m0
h = numpy.ones_like(x) * h0
u = numpy.zeros_like(x) * ul
rho = numpy.ones_like(x) * rhol
p = numpy.ones_like(x) * pl
e = p/( (gamma-1) * rho )
cs = numpy.sqrt( gamma * p/rho )
q = h * cs * g1
left = base.get_particle_array(name="left", x=x, m=m, h=h, u=u,
type=base.Boundary,
rho=rho, p=p, e=e, cs=cs, q=q)
# right boundary
x = numpy.ones(nbp)
for i in range(nbp):
x[i] = xr + (i + 1) * dxr
if not m0:
m = numpy.ones_like(x) * dxl
else:
m = numpy.ones_like(x) * m0
h = numpy.ones_like(x) * h0
u = numpy.zeros_like(x) * ur
rho = numpy.ones_like(x) * rhor
p = numpy.ones_like(x) * pr
e = p/( (gamma-1)*rho )
cs = numpy.sqrt( gamma * p/rho )
q = h * cs * g1
right = base.get_particle_array(name="right", x=x, m=m, h=h, u=u,
type=base.Boundary,
rho=rho, p=p, e=e, cs=cs, q=q)
return adke, left, right
| [
[
8,
0,
0.0099,
0.0099,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0297,
0.0099,
0,
0.66,
0.3333,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.0495,
0.0099,
0,
0.66... | [
"\"\"\" Functions to get the initial data for the shock tube problems \"\"\"",
"import numpy",
"import pysph.base.api as base",
"def get_shock_tube_data(nl, nr, xl, xr,\n pl, pr, rhol, rhor, ul, ur,\n g1, g2, h0, gamma=1.4,\n m0=None):\n\n ... |
""" An example script for running the shock tube problem using Standard
SPH.
Global properties for the shock tube problem:
---------------------------------------------
x ~ [-.6,.6], dxl = 0.001875, dxr = dxl*4, m = dxl, h = 2*dxr
rhol = 1.0, rhor = 0.25, el = 2.5, er = 1.795, pl = 1.0, pr = 0.1795
These are obtained from the solver.shock_tube_solver.standard_shock_tube_data
"""
import logging
import pysph.base.api as base
import pysph.solver.api as solver
from pysph.base.kernels import CubicSplineKernel
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
Locator = base.NeighborLocatorType
nl = 320
nr = 80
# Create the application, do this first so the application sets up the
# logging and also gets all command line arguments.
app = solver.Application()
# Set the solver using the default cubic spline kernel
s = solver.ShockTubeSolver(dim=1, integrator_type=solver.EulerIntegrator)
# set the default solver constants.
s.set_final_time(0.15)
s.set_time_step(3e-4)
# Set the application's solver. We do this at the end since the user
# may have asked for a different timestep/final time on the command
# line.
app.setup(
solver=s,
variable_h=False,
create_particles=solver.shock_tube_solver.standard_shock_tube_data,
name='fluid', type=0,
locator_type=Locator.SPHNeighborLocator,
cl_locator_type=CLLocator.AllPairNeighborLocator,
domain_manager_type=CLDomain.DomainManager,
nl=nl, nr=nr, smoothing_length=None)
# Run the application.
app.run()
| [
[
8,
0,
0.12,
0.22,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.24,
0.02,
0,
0.66,
0.0667,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.28,
0.02,
0,
0.66,
0.1333... | [
"\"\"\" An example script for running the shock tube problem using Standard\nSPH.\n\nGlobal properties for the shock tube problem:\n---------------------------------------------\nx ~ [-.6,.6], dxl = 0.001875, dxr = dxl*4, m = dxl, h = 2*dxr\nrhol = 1.0, rhor = 0.25, el = 2.5, er = 1.795, pl = 1.0, pr = 0.1795",
"... |
""" Standard shock tube problem by Monaghan """
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import get_shock_tube_data as data
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
Locator = base.NeighborLocatorType
kernel = base.CubicSplineKernel
hks=False
# shock tube parameters
xl = -1.0; xr = 1.0
pl = 1000; pr = 0.01
ul = 0.0; ur = 0.0
rhol = 1.0; rhor = 1.0
# Number of particles
nl = 1000
nr = 1000
np = nl + nr
# Time step constants
dt = 5e-6
tf = 0.0075
t = 0.0
# Artificial Viscosity constants
alpha = 1.0
beta = 1.0
gamma = 1.4
eta = 0.1
# ADKE Constants
eps = 0.5
k=1.0
h0 = 1.5*xr/nr
# Artificial Heat constants
g1 = 0.2
g2 = 0.4
def get_particles(with_boundary=False, **kwargs):
adke, left, right = data.get_shock_tube_data(nl=nl, nr=nr, xl=xl, xr=xr,
pl=pl, pr=pr,
rhol=rhol, rhor=rhor,
ul=ul, ur=ur,
g1=g1, g2=g2, h0=h0,
gamma=gamma)
if with_boundary:
return [adke, left, right]
else:
return [adke,]
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=1,
integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta,
kernel=kernel, hks=hks)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=4*h0,
variable_h=True,
create_particles=get_particles,
locator_type=Locator.SPHNeighborLocator,
cl_locator_type=CLLocator.AllPairNeighborLocator,
domain_manager_type=CLDomain.DomainManager,
nl=nl, nr=nr)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta, hks=hks)
app.run()
| [
[
8,
0,
0.0118,
0.0118,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0353,
0.0118,
0,
0.66,
0.0244,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.0588,
0.0118,
0,
0.66... | [
"\"\"\" Standard shock tube problem by Monaghan \"\"\"",
"import numpy",
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"import get_shock_tube_data as data",
"CLDomain = base.DomainManagerType",
"CLLocator = base.OpenCLNeighborLocatorType",
"Locator = base.NeighborLocatorType",
... |
""" Cylindrical Noh's implosion problem using the ADKE algorithm.
Particles are distributed on concentric circles about the origin with
increasing number of particles with increasing radius. The velocity is
initially uniform and directed towards the origin.
"""
import numpy
import pysph.sph.api as sp
import pysph.base.api as base
import pysph.solver.api as solver
pi = numpy.pi
cos = numpy.cos
sin = numpy.sin
gamma = 5.0/3.0
alpha = 1.0
beta = 1.0
k = 0.9
eps = 0.4
g1 = 0.5
g2 = 1.0
dt = 1e-4
tf = 0.6
n = 120
dr = 1.0/n
h0 = dr
rho0 = 1.0
m1 = pi*dr*dr*rho0/4
def create_particles(**kwargs):
x = numpy.zeros(0)
y = numpy.zeros(0)
u = numpy.zeros(0)
v = numpy.zeros(0)
m = numpy.zeros(0)
rad = 0.0
for j in range(1, n+1):
npnts = 4*j
dtheta = 2*pi/npnts
theta = numpy.arange(0, 2*pi-1e-10, dtheta)
rad = rad + dr
_x = rad*cos(theta)
_y = rad*sin(theta)
_u = -cos(theta)
_v = -sin(theta)
if j == 1:
_m = numpy.ones_like(_x) * m1
else:
_m = numpy.ones_like(_x) * (2.0*j - 1.0)/(j) * m1
x = numpy.concatenate( (x, _x) )
y = numpy.concatenate( (y, _y) )
m = numpy.concatenate( (m, _m) )
u = numpy.concatenate( (u, _u) )
v = numpy.concatenate( (v, _v) )
rho = numpy.ones_like(x) * 1.0
h = numpy.ones_like(x) * h0
p = numpy.ones_like(x) * 0.0
e = numpy.ones_like(x) * 0.0
rhop = numpy.ones_like(x)
div = numpy.zeros_like(x)
q = numpy.zeros_like(x)
fluid = base.get_particle_array(name="fluid", type=base.Fluid,
x=x,y=y,m=m,rho=rho, h=h,
u=u,v=v,p=p,e=e,
rhop=rhop, q=q, div=div)
print "Number of fluid particles = ", fluid.get_number_of_particles()
return fluid
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=2,
integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta, gamma=gamma)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=4*h0,
variable_h=True,
create_particles=create_particles)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta,
gamma=gamma, hks=app.options.hks, kernel=app.options.kernel)
app.run()
| [
[
8,
0,
0.037,
0.0648,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0833,
0.0093,
0,
0.66,
0.0333,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.0926,
0.0093,
0,
0.66,... | [
"\"\"\" Cylindrical Noh's implosion problem using the ADKE algorithm.\n\nParticles are distributed on concentric circles about the origin with\nincreasing number of particles with increasing radius. The velocity is\ninitially uniform and directed towards the origin.\n\n\"\"\"",
"import numpy",
"import pysph.sph... |
"""Sedov point explosion problem using the ADKE algorithm.
Particles are distributed on concentric circles about the origin with
increasing number of particles with increasing radius. A unit charge
is distributed about the center which gives the initial pressure
disturbance.
"""
import numpy
import pysph.sph.api as sph
import pysph.base.api as base
import pysph.solver.api as solver
pi = numpy.pi
cos = numpy.cos
sin = numpy.sin
gamma=1.4
R = 0.3
n = 110
dr = R/n
alpha=1.0
beta=1.0
g1=1.0
g2=1.0
k=1.0
eps=0.5
h0 = 2*dr
ro = 0.025
rho0 = 1.0
m1 = pi*dr*dr*rho0/10.0
dt = 1e-4
tf = 0.05
def create_particles(**kwargs):
x = numpy.zeros(0)
y = numpy.zeros(0)
p = numpy.zeros(0)
m = numpy.zeros(0)
rad = 0.0
for j in range(1, n+1):
npnts = 10*j
dtheta = 2*pi/npnts
theta = numpy.arange(0, 2*pi-1e-10, dtheta)
rad = rad + dr
_x = rad*cos(theta)
_y = rad*sin(theta)
if j == 1:
_m = numpy.ones_like(_x) * m1
else:
_m = numpy.ones_like(_x) * (2.0*j - 1.0)/(j) * m1
if rad <= ro:
_p = numpy.ones_like(_x) * (gamma-1.0)*1.0/(pi*ro*ro)
else:
_p = numpy.ones_like(_x) * 1e-5
x = numpy.concatenate( (x, _x) )
y = numpy.concatenate( (y, _y) )
m = numpy.concatenate( (m, _m) )
p = numpy.concatenate( (p, _p) )
rho = numpy.ones_like(x) * rho0
h = numpy.ones_like(x) * h0
e = p/( (gamma-1.0)*rho0 )
rhop = numpy.ones_like(x)
div = numpy.zeros_like(x)
q = numpy.zeros_like(x)
fluid = base.get_particle_array(name="fluid", type=base.Fluid,
x=x,y=y,m=m,rho=rho, h=h,
p=p,e=e,
rhop=rhop, q=q, div=div)
print "Number of fluid particles = ", fluid.get_number_of_particles()
return fluid
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=2,
integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta, gamma=gamma)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=6*h0,
variable_h=True,
create_particles=create_particles)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta,
gamma=gamma, hks=app.options.hks, kernel=app.options.kernel)
app.run()
| [
[
8,
0,
0.0395,
0.0702,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0877,
0.0088,
0,
0.66,
0.0312,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.1053,
0.0088,
0,
0.66... | [
"\"\"\"Sedov point explosion problem using the ADKE algorithm.\n\nParticles are distributed on concentric circles about the origin with\nincreasing number of particles with increasing radius. A unit charge\nis distributed about the center which gives the initial pressure\ndisturbance.\n\n\"\"\"",
"import numpy",
... |
""" Shock tube problem with the ADKE procedure of Sigalotti """
import pysph.solver.api as solver
import pysph.base.api as base
import pysph.sph.api as sph
from pysph.base.kernels import CubicSplineKernel
import numpy
Fluid = base.ParticleType.Fluid
Boundary = base.ParticleType.Boundary
# Shock tube parameters
nl = int(320 * 7.5)
nr = int(80 * 7.5)
dxl = 0.6/nl
dxr = 4*dxl
h0 = 2*dxr
eps = 0.4
k = 0.7
g1 = 0.2
g2 = 0.5
alpha = 1.0
beta = 1.0
hks = False
class UpdateBoundaryParticles:
def __init__(self, particles):
self.particles = particles
def eval(self):
left = self.particles.get_named_particle_array('left')
right = self.particles.get_named_particle_array("right")
fluid = self.particles.get_named_particle_array("fluid")
left.h[:] = fluid.h[0]
right.h[:] = fluid.h[-1]
def get_fluid_particles(**kwargs):
pa = solver.shock_tube_solver.standard_shock_tube_data(
name="fluid", nl=nl, nr=nr)
pa.add_property({'name':'rhop','type':'double'})
pa.add_property({'name':'div', 'type':'double'})
pa.add_property( {'name':'q', 'type':'double'} )
return pa
def get_boundary_particles(**kwargs):
# left boundary
x = numpy.ones(50)
for i in range(50):
x[i] = -0.6 - (i+1) * dxl
m = numpy.ones_like(x) * dxl
h = numpy.ones_like(x) * 2*dxr
rho = numpy.ones_like(x)
u = numpy.zeros_like(x)
e = numpy.ones_like(x) * 2.5
p = (0.4) * rho * e
#cs = numpy.sqrt(0.4 * e)
cs = numpy.sqrt( 1.4*p/rho )
q = g1 * h * cs
left = base.get_particle_array(name="left", type=Boundary,
x=x, m=m, h=h, rho=rho, u=u,
e=e, cs=cs, p=p, q=q)
# right boundary
for i in range(50):
x[i] = 0.6 + (i + 1)*dxr
m = numpy.ones_like(x) * dxl
h = numpy.ones_like(x) * 2*dxr
rho = numpy.ones_like(x) * 0.25
u = numpy.zeros_like(x)
e = numpy.ones_like(x) * 1.795
p = (0.4) * rho * e
cs = numpy.sqrt( 1.4*p/rho )
q = g1 * h * cs
right = base.get_particle_array(name="right", type=Boundary,
x=x, m=m, h=h, rho=rho, u=u,
e=e, cs=cs,p=p, q=q)
return [left, right]
def get_particles(**kwargs):
particles = []
particles.append(get_fluid_particles())
particles.extend(get_boundary_particles())
return particles
# Create the application
app = solver.Application()
# define the solver and kernel
s = solver.Solver(dim=1, integrator_type=solver.RK2Integrator)
#############################################################
# ADD OPERATIONS
#############################################################
# set the smoothing length
s.add_operation(solver.SPHOperation(
sph.SetSmoothingLength.withargs(h0=h0),
on_types=[base.Fluid,],
updates=["h"],
id="setsmoothing")
)
# pilot rho
s.add_operation(solver.SPHOperation(
sph.ADKEPilotRho.withargs(h0=h0),
on_types=[Fluid], from_types=[Fluid,Boundary],
updates=['rhop'], id='adke_rho'),
)
# smoothing length update
s.add_operation(solver.SPHOperation(
sph.ADKESmoothingUpdate.withargs(h0=h0, k=k, eps=eps, hks=hks),
on_types=[Fluid], updates=['h'], id='adke'),
)
# summation density
s.add_operation(solver.SPHOperation(
sph.SPHRho.withargs(hks=hks),
from_types=[Fluid, Boundary], on_types=[Fluid],
updates=['rho'], id = 'density')
)
# ideal gas equation
s.add_operation(solver.SPHOperation(
sph.IdealGasEquation.withargs(),
on_types = [Fluid], updates=['p', 'cs'], id='eos')
)
# velocity divergence
s.add_operation(solver.SPHOperation(
sph.VelocityDivergence.withargs(hks=hks),
on_types=[Fluid], from_types=[Fluid, Boundary],
updates=['div'], id='vdivergence'),
)
#conduction coefficient update
s.add_operation(solver.SPHOperation(
sph.ADKEConductionCoeffUpdate.withargs(g1=g1, g2=g2),
on_types=[Fluid],
updates=['q'], id='qcoeff'),
)
# momentum equation
s.add_operation(solver.SPHIntegration(
sph.MomentumEquation.withargs(alpha=1, beta=1, hks=hks),
from_types=[Fluid, Boundary], on_types=[Fluid],
updates=['u'], id='mom')
)
# energy equation
s.add_operation(solver.SPHIntegration(
sph.EnergyEquation.withargs(),
from_types=[Fluid, Boundary],
on_types=[Fluid], updates=['e'], id='enr')
)
# artificial heat
s.add_operation(solver.SPHIntegration(
sph.ArtificialHeat.withargs(eta=0.1),
on_types=[Fluid], from_types=[Fluid,Boundary],
updates=['e'], id='aheat'),
)
# position step
s.add_operation_step([Fluid])
s.set_final_time(0.15)
s.set_time_step(3e-4)
app.setup(
solver=s,
min_cell_size = 4*h0,
variable_h=True, create_particles=get_particles,
locator_type=base.NeighborLocatorType.SPHNeighborLocator
)
# add the boundary update function to the particles
s.particles.add_misc_function( UpdateBoundaryParticles(s.particles) )
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta, hks=hks)
app.run()
| [
[
8,
0,
0.0044,
0.0044,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0133,
0.0044,
0,
0.66,
0.0233,
683,
0,
1,
0,
0,
683,
0,
0
],
[
1,
0,
0.0177,
0.0044,
0,
0.66... | [
"\"\"\" Shock tube problem with the ADKE procedure of Sigalotti \"\"\"",
"import pysph.solver.api as solver",
"import pysph.base.api as base",
"import pysph.sph.api as sph",
"from pysph.base.kernels import CubicSplineKernel",
"import numpy",
"Fluid = base.ParticleType.Fluid",
"Boundary = base.Particle... |
""" Strong blaswave problem proposed by Sigalotti. Mach number = 771 """
import numpy
import pysph.base.api as base
import pysph.solver.api as solver
import get_shock_tube_data as data
Locator = base.NeighborLocatorType
kernel = base.CubicSplineKernel
hks=False
# shock tube parameters
xl = -1.5; xr = 1.5
pl = 1e4; pr = 0.01
ul = 0.0; ur = 0.0
rhol = 1.0; rhor = 1.0
# Number of particles
nl = 1500
nr = 1500
np = nl + nr
# Time step constants
dt = 5e-6
tf = 4e-3
t = 0.0
# Artificial Viscosity constants
alpha = 1.0
beta = 1.0
gamma = 5.0/3.0
eta = 0.1
# ADKE Constants
eps = 0.8
k=1.0
dx = xr/nr
D = 1.5
h0 = D*dx
# Artificial Heat constants
g1 = 0.2
g2 = 1.0
def get_particles(with_boundary=False, **kwargs):
adke, left, right = data.get_shock_tube_data(nl=nl, nr=nr, xl=xl, xr=xr,
pl=pl, pr=pr,
rhol=rhol, rhor=rhor,
ul=ul, ur=ur,
g1=g1, g2=g2, h0=h0,
gamma=gamma)
if with_boundary:
return [adke, left, right]
else:
return [adke,]
app = solver.Application()
s = solver.ADKEShockTubeSolver(dim=1,
integrator_type=solver.RK2Integrator,
h0=h0, eps=eps, k=k, g1=g1, g2=g2,
alpha=alpha, beta=beta,gamma=gamma,
kernel=kernel, hks=hks,)
s.set_final_time(tf)
s.set_time_step(dt)
app.setup(
solver=s,
min_cell_size=4*h0,
variable_h=True,
create_particles=get_particles,
locator_type=Locator.SPHNeighborLocator)
output_dir = app.options.output_dir
numpy.savez(output_dir + "/parameters.npz", eps=eps, k=k, h0=h0,
g1=g1, g2=g2, alpha=alpha, beta=beta, hks=hks)
app.run()
| [
[
8,
0,
0.0122,
0.0122,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0366,
0.0122,
0,
0.66,
0.0244,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.061,
0.0122,
0,
0.66,... | [
"\"\"\" Strong blaswave problem proposed by Sigalotti. Mach number = 771 \"\"\"",
"import numpy",
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"import get_shock_tube_data as data",
"Locator = base.NeighborLocatorType",
"kernel = base.CubicSplineKernel",
"hks=False",
"xl = -1... |
""" An example solving the Ellptical drop test case """
import pysph.base.api as base
import pysph.solver.api as solver
import warnings
dt = 1e-4
tf = 0.0076
app = solver.Application()
# set the integrator type
integrator_type = solver.RK2Integrator
s = solver.FluidSolver(dim=2, integrator_type=integrator_type)
s.set_time_step(dt)
s.set_final_time(tf)
# app.setup(
# solver=s,
# variable_h=False,
# create_particles=solver.fluid_solver.get_circular_patch, name='fluid', type=0,
# locator_type=base.NeighborLocatorType.SPHNeighborLocator,
# cl_locator_type=base.OpenCLNeighborLocatorType.LinkedListSPHNeighborLocator,
# domain_manager_type=base.DomainManagerType.LinkedListManager)
app.setup(
solver=s,
variable_h=False,
create_particles=solver.fluid_solver.get_circular_patch, name='fluid', type=0,
locator_type=base.NeighborLocatorType.SPHNeighborLocator,
cl_locator_type=base.OpenCLNeighborLocatorType.RadixSortNeighborLocator,
domain_manager_type=base.DomainManagerType.RadixSortManager)
if app.options.with_cl:
msg = """\n\n
You have chosen to run the example with OpenCL support. The only
integrator with OpenCL support is the forward Euler
integrator. This integrator will be used instead of the default
RK2 integrator for this example.\n\n
"""
warnings.warn(msg)
integrator_type = solver.EulerIntegrator
# Print the output at every time step
s.set_print_freq(1)
app.run()
| [
[
8,
0,
0.0189,
0.0189,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0566,
0.0189,
0,
0.66,
0.0714,
212,
0,
1,
0,
0,
212,
0,
0
],
[
1,
0,
0.0755,
0.0189,
0,
0.66... | [
"\"\"\" An example solving the Ellptical drop test case \"\"\"",
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"import warnings",
"dt = 1e-4",
"tf = 0.0076",
"app = solver.Application()",
"integrator_type = solver.RK2Integrator",
"s = solver.FluidSolver(dim=2, integrator_type... |
"""
PySPH
=====
A general purpose Smoothed Particle Hydrodynamics framework.
This package provides a general purpose framework for SPH simulations
in Python. The framework emphasizes flexibility and efficiency while
allowing most of the user code to be written in pure Python. See here:
http://pysph.googlecode.com
for more information.
"""
from setuptools import find_packages, setup
HAS_CYTHON=True
try:
from Cython.Distutils import build_ext
from Cython.Build import cythonize
cmdclass = {'build_ext': build_ext}
except ImportError:
HAS_CYTHON=False
cmdclass = {}
from numpy.distutils.extension import Extension
import numpy
import sys
import os
import platform
import multiprocessing
ncpu = multiprocessing.cpu_count()
inc_dirs = [numpy.get_include()]
extra_compile_args = []
extra_link_args = []
mpi_inc_dirs = []
mpi_compile_args = []
mpi_link_args = []
USE_CPP = True
HAS_MPI4PY = True
try:
import mpi4py
# assume a working mpi environment
import commands
if USE_CPP:
mpic = 'mpicxx'
else:
mpic = 'mpicc'
mpi_link_args.append(commands.getoutput(mpic + ' --showme:link'))
mpi_compile_args.append(commands.getoutput(mpic +' --showme:compile'))
mpi_inc_dirs.append(mpi4py.get_include())
except ImportError:
HAS_MPI4PY = False
cy_directives = {'embedsignature':True,
}
C_EXTN = 'c'
if USE_CPP:
C_EXTN = 'cpp'
# cython extension modules (subpackage directory:cython file)
extensions = {'base': ['carray.pyx',
'fast_utils.pyx',
'point.pyx',
'particle_array.pyx',
'cell.pyx',
'kernels.pyx',
'nnps.pyx',
'plane.pyx',
'polygon_array.pyx',
'geometry.pyx',
'nnps_util.pyx',
],
'sph': ['sph_func.pyx',
'sph_calc.pyx',
'kernel_correction.pyx',
],
'sph/funcs': ['basic_funcs.pyx',
'position_funcs.pyx',
'boundary_funcs.pyx',
'external_force.pyx',
'density_funcs.pyx',
'energy_funcs.pyx',
'viscosity_funcs.pyx',
'pressure_funcs.pyx',
'xsph_funcs.pyx',
'eos_funcs.pyx',
'adke_funcs.pyx',
'arithmetic_funcs.pyx',
'stress_funcs.pyx',
'linalg.pyx',
'gsph_funcs.pyx',
'euler1d.pyx',
'test_funcs.pyx',
'common.pyx',
],
'solver': ['particle_generator.pyx',
],
}
parallel_extensions = {'parallel': ['parallel_controller.pyx',
'parallel_cell.pyx',
'parallel_manager.pyx',
],
}
def gen_extensions(ext):
"""Given a dictionary with key package name and value a list of Cython
files, return a list of Extension instances."""
modules = []
for subpkg, files in ext.iteritems():
for filename in files:
base = os.path.splitext(filename)[0]
module = 'pysph.%s.%s'%(subpkg, base)
module = module.replace("/", ".")
ext = 'pyx'
if not HAS_CYTHON:
ext = C_EXTN
src = 'source/pysph/%s/%s.%s'%(subpkg, base, ext)
modules.append(Extension(module, [src]))
return modules
ext_modules = gen_extensions(extensions)
par_modules = gen_extensions(parallel_extensions)
if HAS_MPI4PY:
ext_modules.extend(par_modules)
for extn in ext_modules:
extn.include_dirs = inc_dirs
extn.extra_compile_args = extra_compile_args
extn.extra_link_args = extra_link_args
extn.pyrex_directives = cy_directives
if USE_CPP:
extn.language = 'c++'
for extn in par_modules:
extn.include_dirs.extend(mpi_inc_dirs)
extn.extra_compile_args.extend(mpi_compile_args)
extn.extra_link_args.extend(mpi_link_args)
if 'build_ext' in sys.argv or 'develop' in sys.argv or 'install' in sys.argv:
d = {'__file__':'source/pysph/base/generator.py'}
execfile('source/pysph/base/generator.py', d)
d['main'](None)
if HAS_CYTHON and platform.system() != "Windows":
ext_modules = cythonize(ext_modules,nthreads=ncpu,include_path=inc_dirs)
setup(name='PySPH',
version = '0.9beta',
author = 'PySPH Developers',
author_email = 'pysph-dev@googlegroups.com',
description = "A general purpose Smoothed Particle Hydrodynamics framework",
long_description = __doc__,
url = 'http://pysph.googlecode.com',
license = "BSD",
keywords = "SPH simulation computational fluid dynamics",
test_suite = "nose.collector",
packages = find_packages('source'),
package_dir = {'': 'source'},
ext_modules = ext_modules,
include_package_data = True,
cmdclass=cmdclass,
#install_requires=['mpi4py>=1.2', 'numpy>=1.0.3', 'Cython>=0.14'],
#setup_requires=['Cython>=0.14', 'setuptools>=0.6c1'],
#extras_require={'3D': 'Mayavi>=3.0'},
zip_safe = False,
entry_points = """
[console_scripts]
pysph_viewer = pysph.tools.mayavi_viewer:main
""",
platforms=['Linux', 'Mac OS-X', 'Unix', 'Windows'],
classifiers = [c.strip() for c in """\
Development Status :: 4 - Beta
Environment :: Console
Intended Audience :: Developers
Intended Audience :: Science/Research
License :: OSI Approved :: BSD License
Natural Language :: English
Operating System :: MacOS :: MacOS X
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Programming Language :: Python
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Physics
Topic :: Software Development :: Libraries
""".splitlines() if len(c.split()) > 0],
)
| [
[
8,
0,
0.0379,
0.0707,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0808,
0.0051,
0,
0.66,
0.0312,
182,
0,
2,
0,
0,
182,
0,
0
],
[
14,
0,
0.0859,
0.0051,
0,
0.6... | [
"\"\"\"\nPySPH\n=====\n\nA general purpose Smoothed Particle Hydrodynamics framework.\n\nThis package provides a general purpose framework for SPH simulations\nin Python. The framework emphasizes flexibility and efficiency while",
"from setuptools import find_packages, setup",
"HAS_CYTHON=True",
"try:\n f... |
""" Helper functions to generate commonly used geometries.
PySPH used an axis convention as follows:
Y
|
|
|
|
|
| /Z
| /
| /
| /
| /
| /
|/_________________X
"""
import numpy
def create_2D_tank(x1,y1,x2,y2,dx):
""" Generate an open rectangular tank.
Parameters:
-----------
x1,y1,x2,y2 : Coordinates defining the rectangle in 2D
dx : The spacing to use
"""
yl = numpy.arange(y1, y2+dx/2, dx)
xl = numpy.ones_like(yl) * x1
nl = len(xl)
yr = numpy.arange(y1,y2+dx/2, dx)
xr = numpy.ones_like(yr) * x2
nr = len(xr)
xb = numpy.arange(x1+dx, x2-dx+dx/2, dx)
yb = numpy.ones_like(xb) * y1
nb = len(xb)
n = nb + nl + nr
x = numpy.empty( shape=(n,) )
y = numpy.empty( shape=(n,) )
idx = 0
x[idx:nl] = xl; y[idx:nl] = yl
idx += nl
x[idx:idx+nb] = xb; y[idx:idx+nb] = yb
idx += nb
x[idx:idx+nr] = xr; y[idx:idx+nr] = yr
return x, y
def create_3D_tank(x1, y1, z1, x2, y2, z2, dx):
""" Generate an open rectangular tank.
Parameters:
-----------
x1,y1,x2,y2,x3,y3 : Coordinates defining the rectangle in 2D
dx : The spacing to use
"""
points = []
# create the base X-Y plane
x, y = numpy.mgrid[x1:x2+dx/2:dx, y1:y2+dx/2:dx]
x = x.ravel(); y = y.ravel()
z = numpy.ones_like(x) * z1
for i in range(len(x)):
points.append( (x[i], y[i], z[i]) )
# create the front X-Z plane
x, z = numpy.mgrid[x1:x2+dx/2:dx, z1:z2+dx/2:dx]
x = x.ravel(); z = z.ravel()
y = numpy.ones_like(x) * y1
for i in range(len(x)):
points.append( (x[i], y[i], z[i]) )
# create the Y-Z plane
y, z = numpy.mgrid[y1:y2+dx/2:dx, z1:z2+dx/2:dx]
y = y.ravel(); z = z.ravel()
x = numpy.ones_like(y) * x1
for i in range(len(x)):
points.append( (x[i], y[i], z[i]) )
# create the second X-Z plane
x, z = numpy.mgrid[x1:x2+dx/2:dx, z1:z2+dx/2:dx]
x = x.ravel(); z = z.ravel()
y = numpy.ones_like(x) * y2
for i in range(len(x)):
points.append( (x[i], y[i], z[i]) )
# create the second Y-Z plane
y, z = numpy.mgrid[y1:y2+dx/2:dx, z1:z2+dx/2:dx]
y = y.ravel(); z = z.ravel()
x = numpy.ones_like(y) * x2
for i in range(len(x)):
points.append( (x[i], y[i], z[i]) )
points = set(points)
x = numpy.array( [i[0] for i in points] )
y = numpy.array( [i[1] for i in points] )
z = numpy.array( [i[2] for i in points] )
return x, y, z
def create_2D_filled_region(x1, y1, x2, y2, dx):
x,y = numpy.mgrid[x1:x2+dx/2:dx, y1:y2+dx/2:dx]
x = x.ravel(); y = y.ravel()
return x, y
def create_3D_filled_region(x1, y1, z1, x2, y2, z2, dx):
x,y,z = numpy.mgrid[x1:x2+dx/2:dx, y1:y2+dx/2:dx, z1:z2+dx/2:dx]
x = x.ravel()
y = y.ravel()
z = z.ravel()
return x, y, z
| [
[
8,
0,
0.0786,
0.15,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1643,
0.0071,
0,
0.66,
0.2,
954,
0,
1,
0,
0,
954,
0,
0
],
[
2,
0,
0.3143,
0.2786,
0,
0.66,
... | [
"\"\"\" Helper functions to generate commonly used geometries.\n\nPySPH used an axis convention as follows:\n\nY\n|\n|\n|",
"import numpy",
"def create_2D_tank(x1,y1,x2,y2,dx):\n \"\"\" Generate an open rectangular tank.\n\n Parameters:\n -----------\n\n x1,y1,x2,y2 : Coordinates defining the rectan... |
""" Module to implement various space filling curves for load balancing """
import numpy
from pysph.base.point import IntPoint
try:
from hilbert import Hilbert_to_int
have_hilbert = True
except ImportError:
# TODO: implement Hilbert's SFC
have_hilbert = False
def morton_sfc(cell_id, maxlen=20, dim=3):
"""Returns key of indices using Morton's space filling curve """
if isinstance(cell_id, IntPoint):
cell_id = (cell_id.x,cell_id.y,cell_id.z)
cell_id = cell_id[:dim]
binary_repr = numpy.binary_repr
s = 2**maxlen
#x_bin = binary_repr(cell_id.x+s)
#y_bin = binary_repr(cell_id.y+s)
#z_bin = binary_repr(cell_id.z+s)
binr = [binary_repr(i+s) for i in cell_id]
#maxlen = len(binary_repr(2**self.level))
bins = []
for bin in binr:
if len(bin) < maxlen+1:
bin = '0'*(maxlen-len(bin)) + bin
bins.append(bin)
#x_bin ,y_bin,z_bin = bins
key = 0
for i in range(maxlen+1):
for bin in bins:
key = 2*key + (bin[i] == '1')
return key
def hilbert_sfc(cell_id, maxlen=20, dim=3):
"""Returns key of indices using Hilbert space filling curve """
if isinstance(cell_id, IntPoint):
cell_id = (cell_id.x,cell_id.y,cell_id.z)
cell_id = cell_id[:dim]
s = 2**maxlen
return Hilbert_to_int([int(i+s) for i in cell_id])
sfc_func_dict = {'morton':morton_sfc}
if have_hilbert:
sfc_func_dict['hilbert'] = hilbert_sfc
| [
[
8,
0,
0.0217,
0.0217,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0652,
0.0217,
0,
0.66,
0.1429,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.087,
0.0217,
0,
0.66,... | [
"\"\"\" Module to implement various space filling curves for load balancing \"\"\"",
"import numpy",
"from pysph.base.point import IntPoint",
"try:\n from hilbert import Hilbert_to_int\n have_hilbert = True\nexcept ImportError:\n # TODO: implement Hilbert's SFC\n have_hilbert = False",
" from... |
from parallel_manager import ParallelManager
from parallel_controller import ParallelController
from pysph.base.particle_array import get_local_real_tag, get_dummy_tag
from pysph.base.fast_utils import arange_long
# logger imports
import logging
logger = logging.getLogger()
# Constants
Dummy = get_dummy_tag()
LocalReal = get_local_real_tag()
class SimpleParallelManager(ParallelManager):
"""This is a very simple parallel manager. It simply broadcasts all the
particles. Each machine has exactly the same particles for all time.
There is no support currently for dynamically changing the particles but
that should be trivial to add.
"""
def __init__(self, parallel_controller=None):
if parallel_controller is None:
parallel_controller = ParallelController()
self.parallel_controller = parallel_controller
self.comm = parallel_controller.comm
self.size = self.parallel_controller.num_procs
self.rank = self.parallel_controller.rank
def initialize(self, particles):
"""Initialize the parallel manager with the `Particles`.
"""
self.particles = particles
def update(self):
"""Update particles. This method simply partitions the particles
equally among the processors.
"""
logger.debug("SimpleParallelManager.update()")
comm = self.comm
rank = self.rank
size = self.size
local_data = self.particles.arrays
# Remove remotes from the local.
for arr in local_data:
remove = arange_long(arr.num_real_particles, arr.get_number_of_particles())
arr.remove_particles(remove)
# everybody sets the pid for their local arrays
arr.set_pid(rank)
comm.Barrier()
# Collect all the local arrays and then broadcast them.
data = comm.gather(local_data, root=0)
data = comm.bcast(data, root=0)
# Now set the remote data's tags to Dummy and add the arrays to
# the local.
for i in range(size):
if i != rank:
for j, arr in enumerate(data[i]):
tag = arr.get_carray('tag')
tag.get_npy_array()[:] = Dummy
#local = arr.get_carray('local')
#local.get_npy_array()[:] = 0
local_data[j].append_parray(arr)
return
def update_remote_particle_properties(self, props):
"""Update only the remote particle properties.
This is typically called when particles don't move but only some of
their properties have changed.
"""
logger.debug("SimpleParallelManager.update_remote_particle_properties()")
# Just call update.
self.update()
| [
[
1,
0,
0.0118,
0.0118,
0,
0.66,
0,
501,
0,
1,
0,
0,
501,
0,
0
],
[
1,
0,
0.0235,
0.0118,
0,
0.66,
0.125,
346,
0,
1,
0,
0,
346,
0,
0
],
[
1,
0,
0.0471,
0.0118,
0,
0... | [
"from parallel_manager import ParallelManager",
"from parallel_controller import ParallelController",
"from pysph.base.particle_array import get_local_real_tag, get_dummy_tag",
"from pysph.base.fast_utils import arange_long",
"import logging",
"logger = logging.getLogger()",
"Dummy = get_dummy_tag()",
... |
"""
Module to implement parallel decomposition of particles to assign to
different processes during parallel simulations. The method used is an
extension of k-means clustering algorithm
"""
# logging imports
import logging
logger = logging.getLogger()
# standard imports
import numpy
# local imports
from pysph.base.cell import py_construct_immediate_neighbor_list
from load_balancer import LoadBalancer
from load_balancer_sfc import LoadBalancerSFC
class Cluster():
"""Class representing a cluster in k-means clustering"""
def __init__(self, cells, cell_np, np_req, **kwargs):
"""constructor
kwargs can be used to finetune the algorithm:
t = ratio of old component of center used in the center calculation
tr = `t` when the number of particles over/undershoot (reversal)
u = ratio of nearest cell center in the new center from the remaining
(1-t) (other component is the centroid) of cells
e = reciprocal of the exponent of
(required particles)/(actual particles) used to
resize the cluster
er = `e` on reversal (see `tr`)
r = clipping of resize factor between (1/r and r)
"""
self.cells = cells
self.cell_np = cell_np
self.dnp = 0
self.np = 0
self.dsize = 0.0
self.size = 1.0
self.np_req = np_req
# ratio of old component
self.tr = kwargs.get('tr',0.8)
# ratio of nearest cell in the new component (other is the centroid)
self.u = kwargs.get('u',0.4)
# exponent for resizing
self.e = kwargs.get('e',3.0)
self.er = kwargs.get('er',6.0)
self.r = kwargs.get('r',2.0)
# there's no previous center hence it shouldn't come into calculation
self.t = 0.0
self.x = self.y = self.z = 0.0
np = 0
for cell in self.cells:
n = self.cell_np[cell]
np += n
self.x += (cell.x)#*n
self.y += (cell.y)#*n
self.z += (cell.z)#*n
self.np = np
np = float(len(self.cells))
self.x, self.y, self.z = self.x/np,self.y/np,self.z/np
self.center = numpy.array([self.x, self.y, self.z])
self.dcenter = self.center*0
# so that initial setting is not way off
self.move()
# set the value of t
self.t = kwargs.get('t',0.2)
def calc(self):
"""calculate the number of particles and the change in the number of
particles (after a reallocation of cells)"""
np = 0
for cell in self.cells:
n = self.cell_np[cell]
np += n
self.dnp = np - self.np
self.np = np
def move(self):
"""move the center depending on the centroid of cells (A),
the nearest cell to the centroid (B) and the old center(C)
formula: new center = (1-t)(1-u)A + (1-t)uB + tC
t = tr on reversal (overshoot/undershoot of particles)"""
x = y = z = 0.0
for cell in self.cells:
x += (cell.x)#*n
y += (cell.y)#*n
z += (cell.z)#*n
np = float(len(self.cells))
med = numpy.array([x/np,y/np,z/np])
dists = []
for cell in self.cells:
d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2
d = numpy.sqrt(d)
dists.append(d)
#md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2
#dists[-1] = (dists[-1]+md)/2
cell = self.cells[numpy.argmin(dists)]
cc = numpy.array([cell.x, cell.y, cell.z])
t = self.t
if abs(self.dnp) * ( self.np-self.np_req) > 0:
t = self.tr
self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))
self.x,self.y,self.z = self.center = self.center + self.dcenter
def resize(self):
"""resize the cluster depending on the number of particles and the
required number of particles
formula: new size = (old_size)*(np_req/np)**(1/e),
clipped between r and 1/r"""
e = self.e
if abs(self.dnp) * ( self.np-self.np_req) > 0:
e = self.er
self.dsize = numpy.clip((self.np_req/self.np)**(1./e), 1/self.r, self.r)
self.size *= self.dsize
class ParDecompose:
"""Partition of cells for parallel solvers"""
def __init__(self, cell_proc, proc_cell_np, init=True, **kwargs):
"""constructor
kwargs can be used to finetune the algorithm:
c = (0.3) the ratio of euler distance contribution in calculating the
distance of particle from cluster center
(the other component is scaled distance based on cluster size)
t = (0.2) ratio of old component of center in the center calculation
tr = (0.8) `t` when the number of particles over/undershoot (reversal)
u = (0.4) ratio of nearest cell center in the new center from the
remaining (1-t) (other component is the centroid) of cells
e = (3) reciprocal of the exponent of
(required particles)/(actual particles) used to
resize the cluster
er = (6) `e` on reversal (see `tr`)
r = (2) clipping of resize factor between (1/r and r)
"""
self.block_proc = cell_proc
self.proc_block_np = proc_cell_np
self.num_procs = len(proc_cell_np)
self.c = kwargs.get('c', 0.3)
if init:
self.gen_clusters(**kwargs)
def clusters_allocate_cells(self):
"""allocate the cells in the cell manager to clusters based on their
"weighted distance" from the center of the cluster"""
for cluster in self.clusters:
cluster.cells[:] = []
for cell in self.block_proc:
wdists = []
for cluster in self.clusters:
s = cluster.size
d = ( (cell.x-cluster.x)**2 + (cell.y-cluster.y)**2 +
(cell.z-cluster.z)**2 )
d = numpy.sqrt(d)
c = self.c
# TODO: choose a better distance function below
r = d*(c+(1-c)*numpy.exp(-s/d))
r = numpy.clip(r,0,r)
wdists.append(r)
self.clusters[numpy.argmin(wdists)].cells.append(cell)
def get_distribution(self):
"""return the list of cells and the number of particles in each
cluster to be used for distribution to processes"""
self.calc()
proc_blocks = self.proc_blocks
proc_num_particles = self.particle_loads
cell_proc = LoadBalancer.get_block_proc(proc_blocks=proc_blocks)
return cell_proc, proc_num_particles
def cluster_bal_iter(self):
"""perform a single iteration of balancing the clusters
**algorithm**
# move the cluster center based on their cells
# allocate cells to clusters based on new centers
# resize the clusters based on the number of particles
# allocate cells to clusters based on new sizes
"""
# moving
for j,cluster in enumerate(self.clusters):
cluster.move()
self.clusters_allocate_cells()
for j,cluster in enumerate(self.clusters):
cluster.calc()
#print j, '\t', cluster.center, '\t', cluster.np, '\t', cluster.size
# resizing
for j,cluster in enumerate(self.clusters):
cluster.resize()
self.clusters_allocate_cells()
for j,cluster in enumerate(self.clusters):
cluster.calc()
#print j, '\t', cluster.center, '\t', cluster.np, '\t', cluster.size
self.calc()
def calc(self):
"""calculates the cells in each process, the cell and particle loads
and the imbalance in the distribution"""
self.proc_blocks = [cluster.cells for cluster in self.clusters]
self.cell_loads = [sum([len(cell) for cell in self.proc_blocks])]
self.particle_loads = [cluster.np for cluster in self.clusters]
self.imbalance = LoadBalancer.get_load_imbalance(self.particle_loads)
def gen_clusters(self, proc_cells=None, proc_num_particles=None, **kwargs):
"""generate the clusters to operate on. This is automatically called
by the constructor if its `init` argument is True (default)"""
cell_np = {}
for tmp_cells_np in self.proc_block_np:
cell_np.update(tmp_cells_np)
self.cell_np = cell_np
if proc_cells is None:
proc_cells, proc_num_particles = LoadBalancer.distribute_particles_geometric(
self.cell_np, self.num_procs)
self.np_req = numpy.average(proc_num_particles)
self.clusters = [Cluster(cells, cell_np, self.np_req, **kwargs)
for cells in proc_cells]
self.calc()
def distribute_particles(cm, num_procs, max_iter=200, n=5, **kwargs):
""" distribute particles according to the modified k-means clustering
algorithm implemented by the `ParDecompose` class
The algorithm runs maximum `max_iter` iterations.
The solution is assumed converged if the particle distribution is same
in `n+k` steps out of `n+2k` latest steps
See :class:`ParDecompose` for the fine-tuning parameters kwargs"""
pd = ParDecompose(cm, num_procs, **kwargs)
pd.calc()
proc_num_particles = pd.particle_loads
conv = 0
for t in range(max_iter):
pd.cluster_bal_iter()
pd.calc()
#print t
proc_num_particlesold = proc_num_particles
proc_num_particles = pd.particle_loads
imbal = pd.imbalance
logger.debug('imbalance %g' %imbal)
if proc_num_particlesold == proc_num_particles:
conv += 1
logger.debug('converged in %d iterations' %t)
if conv > n:
break
else:
conv -= 1
if conv < 0: conv = 0
return pd.get_distribution()
###############################################################################
# `LoadBalancerMKMeans` class.
###############################################################################
class LoadBalancerMKMeans(LoadBalancerSFC):
def __init__(self, **args):
LoadBalancerSFC.__init__(self, **args)
self.method = 'serial_mkmeans'
self.args = args
def load_balance_func_serial_mkmeans(self, **args):
self.load_balance_func_serial('mkmeans', **args)
def load_redistr_mkmeans(self, cell_proc=None, proc_cell_np=None, max_iter=None, n=3, **args):
""" distribute particles according to the modified k-means clustering
algorithm implemented by the `ParDecompose` class
The algorithm runs maximum `max_iter` iterations.
The solution is assumed converged if the particle distribution is same
in `n+k` steps out of `n+2k` latest steps
See :class:`ParDecompose` for the fine-tuning parameters kwargs"""
args2 = {}
args2.update(self.args)
args2.update(args)
if max_iter is None:
max_iter = self.lb_max_iterations
#print args
pd = ParDecompose(cell_proc, proc_cell_np, **args)
pd.calc()
proc_num_particles = pd.particle_loads
conv = 0
for t in range(max_iter):
pd.cluster_bal_iter()
pd.calc()
#print t
proc_num_particlesold = proc_num_particles
proc_num_particles = pd.particle_loads
imbal = pd.imbalance
logger.debug('imbalance %g' %imbal)
if proc_num_particlesold == proc_num_particles:
conv += 1
logger.debug('converged in %d iterations' %t)
if conv > n:
logger.debug('mkm converged in %d iterations' %t)
break
else:
conv -= 1
if conv < 0: conv = 0
#self.balancing_done = True
return pd.get_distribution()
| [
[
8,
0,
0.0096,
0.0159,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0255,
0.0032,
0,
0.66,
0.1,
715,
0,
1,
0,
0,
715,
0,
0
],
[
14,
0,
0.0287,
0.0032,
0,
0.66,
... | [
"\"\"\"\nModule to implement parallel decomposition of particles to assign to\ndifferent processes during parallel simulations. The method used is an\nextension of k-means clustering algorithm\n\"\"\"",
"import logging",
"logger = logging.getLogger()",
"import numpy",
"from pysph.base.cell import py_constru... |
"""
Contains class to perform load balancing using space filling curves.
"""
# logging imports
import logging
logger = logging.getLogger()
# standard imports
import numpy
# local imports
from pysph.base.particle_array import ParticleArray
from pysph.base.cell import py_construct_immediate_neighbor_list
from load_balancer import LoadBalancer
import space_filling_curves
###############################################################################
# `LoadBalancerSFC` class.
###############################################################################
class LoadBalancerSFC(LoadBalancer):
def __init__(self, sfc_func_name='morton', sfc_func_dict=None, **args):
LoadBalancer.__init__(self, **args)
self.method = 'serial_sfc'
if sfc_func_dict is None:
sfc_func_dict = space_filling_curves.sfc_func_dict
self.sfc_func_dict = sfc_func_dict
self.sfc_func = sfc_func_name
def load_balance_func_serial_sfc(self, sfc_func_name=None, **args):
""" serial load balance function which uses SFCs
calls the :class:Loadbalancer :meth:load_balance_func_serial
setting the appropriate sfc function
"""
if sfc_func_name is None:
sfc_func_name = self.sfc_func
sfc_func = self.sfc_func_dict[sfc_func_name]
self.load_balance_func_serial('sfc', sfc_func=sfc_func, **args)
def load_redistr_sfc(self, cell_proc, proc_cell_np, sfc_func=None, **args):
""" function to redistribute the cells amongst processes using SFCs
This is called by :class:Loadbalancer :meth:load_balance_func_serial
"""
if isinstance(sfc_func, str):
sfc_func = self.sfc_func_dict[sfc_func]
if sfc_func is None:
sfc_func = self.sfc_func_dict[self.sfc_func]
num_procs = len(proc_cell_np)
num_cells = len(cell_proc)
cell_arr = numpy.empty((num_cells, 3))
for i,cell_id in enumerate(cell_proc):
cell_arr[i,0] = cell_id.x
cell_arr[i,1] = cell_id.y
cell_arr[i,2] = cell_id.z
dim = 3
if min(cell_arr[:,2])==max(cell_arr[:,2]):
dim = 2
if min(cell_arr[:,1])==max(cell_arr[:,1]):
dim = 1
np_per_proc = sum(self.particles_per_proc)/float(self.num_procs)
cell_ids = cell_proc.keys()
cell_ids.sort(key=lambda x: sfc_func(x, dim=dim))
ret_cells = [[] for i in range(num_procs)]
proc_num_particles = [0]*num_procs
np = 0
proc = 0
for cell_id in cell_ids:
np += self.proc_block_np[cell_proc[cell_id]][cell_id]
#print proc, cell_id, np
ret_cells[proc].append(cell_id)
if np > np_per_proc:
proc_num_particles[proc] = np
np -= np_per_proc
proc += 1
self.particles_per_proc = [0]*self.num_procs
cell_np = {}
for cnp in self.proc_block_np:
cell_np.update(cnp)
for proc,cells in enumerate(ret_cells):
for cid in cells:
cell_proc[cid] = proc
self.particles_per_proc[proc] += cell_np[cid]
self.balancing_done = True
return cell_proc, self.particles_per_proc
###############################################################################
| [
[
8,
0,
0.0213,
0.0319,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0638,
0.0106,
0,
0.66,
0.125,
715,
0,
1,
0,
0,
715,
0,
0
],
[
14,
0,
0.0745,
0.0106,
0,
0.66... | [
"\"\"\"\nContains class to perform load balancing using space filling curves.\n\"\"\"",
"import logging",
"logger = logging.getLogger()",
"import numpy",
"from pysph.base.particle_array import ParticleArray",
"from pysph.base.cell import py_construct_immediate_neighbor_list",
"from load_balancer import ... |
""" Contains class to perform load balancing.
"""
#FIXME: usage documentation
# logging imports
import logging
logger = logging.getLogger()
# standard imports
import numpy
# local imports
from pysph.base.particle_array import ParticleArray, get_particle_array
from pysph.base.cell import CellManager, py_construct_immediate_neighbor_list
TAG_LB_PARTICLE_REQUEST = 101
TAG_LB_PARTICLE_REPLY = 102
###############################################################################
# `LoadBalancer` class.
###############################################################################
class LoadBalancer:
""" Class to perform simple load balancing. """
def __init__(self, parallel_cell_manager=None, *args, **kwargs):
self.setup_done = False
self.cell_manager = parallel_cell_manager
self.skip_iteration = 10
self.pid = 0
self.num_procs = 1
self.particles_per_proc = []
self.ideal_load = 0.
self.threshold_ratio = 25.
self.threshold_margin = 0.
self.lb_max_iterations = 10
self.upper_threshold = 0.
self.lower_threshold = 0.
self.load_difference = []
self.prev_particle_count = []
self.method = None
#self.adaptive = kwargs.get('adaptive', True)
def setup(self):
""" Sets up some internal data. """
if self.setup_done == True:
return
self.proc_map = self.cell_manager.proc_map
self.parallel_controller = self.cell_manager.parallel_controller
self.pid = self.parallel_controller.rank
self.num_procs = self.parallel_controller.num_procs
self.comm = self.parallel_controller.comm
self.setup_done = True
def load_balance(self, method=None, **args):
""" Calls the load_balance_func """
self.setup()
if method is None:
method = self.method
if method is None or method == '':
self.load_balance_func(**args)
else:
func = getattr(self, 'load_balance_func_'+method)
func(**args)
def load_balance_func(self, adaptive=False):
return self.load_balance_func_normal(adaptive)
def load_balance_func_normal(self, adaptive=False):
""" Perform the load balancing.
**Algorithm**
- while load not balanced or lb iterations not exceeded.
- Compute some statistics
- Find the number of real particles in all processors.
- Find the total number of particles.
- Find the mean number of particles with each processor.
- If number of particles with each processor is within a
particular threshold from the mean, load is balanced, exit.
- Sort processor ids in increasing order of number of particles
with them. In case of multiple processors having the same
number of particles, arrange them in ascending order of pid.
- If there are some processors with 0 particles, communication
among all processors.
- If no such processors are there, each processor shall
communicate with adjacent neighbors.
- *********** PASS1 ************
- mypid <- self.rank
- num_procs <- len(procs_to_communicate)
- i = num_procs-1
- pid <- procs_to_communicate[i]
- while pid != mypid:
- send request to pid for particles.
- recv particles of one or more blocks from pid
- add particles to particle array.
- i -= 1
- *********** PASS2 ************
- i = 0
- pid <- procs_to_communicate[i]
- while pid != mypid:
- recv request from pid for particles.
- find a suitable set of blocks to offload.
- send particles of these blocks to pid.
- remove sent particles from local blocks.
- i += 1
- BARRIER.
- bin particles top down.
- update processor map.
- update neighbor information.
- lb_iterations += 1
"""
self.adaptive = adaptive
balancing_done = False
current_balance_iteration = 0
num_procs = self.num_procs
self.particles_per_proc = [0]*num_procs
if len(self.prev_particle_count) == 0:
self.prev_particle_count = [0]*num_procs
self.ideal_load = 0.
self.load_difference = [0]*num_procs
while balancing_done == False:
block_np = {}
for bid, cells in self.cell_manager.proc_map.cell_map.iteritems():
block_np[bid] = 0
for cid in cells:
block_np[bid] += self.cell_manager.cells_dict[cid].get_number_of_particles()
self.proc_block_np = [{} for i in range(num_procs)]
self.proc_block_np[self.pid].update(block_np)
logger.info('Load Balance iteration %d -------------------'%(
current_balance_iteration))
if current_balance_iteration >= self.lb_max_iterations:
balancing_done = True
logger.info('MAX LB ITERATIONS EXCEEDED')
continue
# get the number of particles with each process.
self.particles_per_proc = self.collect_num_particles()
self.calc_load_thresholds(self.particles_per_proc)
min_diff = min(self.load_difference)
max_diff = max(self.load_difference)
if (abs(min_diff) < self.threshold_margin and max_diff <
self.threshold_margin):
balancing_done = True
logger.info('BALANCE ACHIEVED')
logger.debug('Num particles are : %s'%(self.particles_per_proc))
continue
logger.info('particle_counts: %r: %r'%(self.prev_particle_count,
self.particles_per_proc))
if self.particles_per_proc == self.prev_particle_count:
# meaning that the previous load balancing iteration did not
# change the particle counts, we do not do anything now.
balancing_done = True
logger.info('Load unchanged')
continue
logger.debug('Total particles : %d'%(self.total_particles))
logger.debug('Ideal load : %d'%(self.ideal_load))
logger.debug('Load DIfference : %s'%(self.load_difference))
logger.info('Particle counts : %s'%(self.particles_per_proc))
logger.debug('Threshold margin: %f'%(self.threshold_margin))
logger.debug('Upper threshold : %f'%(self.upper_threshold))
logger.debug('Lower threshold : %f'%(self.lower_threshold))
self.block_proc = self.cell_manager.proc_map.block_map
# store the old particle counts in prev_particle_count
self.prev_particle_count[:] = self.particles_per_proc
if min(self.particles_per_proc) == 0:
self.load_balance_with_zero_procs()
else:
self.load_balance_normal()
# update the cell information.
self.cell_manager.remove_remote_particles()
self.cell_manager.delete_empty_cells()
self.cell_manager.rebin_particles()
self.proc_map.glb_update_proc_map(self.cell_manager.cells_dict)
#assert len(self.proc_map.conflicts) == 0
#recv_particles = self.proc_map.resolve_procmap_conflicts({})
self.proc_map.find_region_neighbors()
#self.cell_manager.add_entering_particles_from_neighbors(recv_particles)
self.comm.Barrier()
current_balance_iteration += 1
def collect_num_particles(self):
""" Finds the number of particles with each processor.
**Algorithm**
- gather each processors particle count at the root.
- scatter this data to all processors.
"""
arrays = self.cell_manager.arrays_to_bin
num_particles = sum(map(ParticleArray.get_number_of_particles, arrays))
particles_per_proc = self.comm.gather(num_particles, root=0)
# now num_particles has one entry for each processor, containing the
# number of particles with each processor. broadcast that data to all
# processors.
particles_per_proc = self.comm.bcast(particles_per_proc, root=0)
return particles_per_proc
def load_balance_normal(self):
""" The normal diffusion based load balance algorithm. """
self.procs_to_communicate = self._get_procs_to_communicate(
self.particles_per_proc,
self.cell_manager.proc_map.nbr_procs)
num_procs = len(self.procs_to_communicate)
# PASS 1
num_procs = len(self.procs_to_communicate)
i = num_procs - 1
pid = self.procs_to_communicate[i]
while pid != self.pid:
self.normal_lb_pass1(pid)
i -= 1
pid = self.procs_to_communicate[i]
# PASS 2
i = 0
pid = self.procs_to_communicate[i]
while pid != self.pid:
self.normal_lb_pass2(pid)
i += 1
pid = self.procs_to_communicate[i]
def load_balance_with_zero_procs(self):
""" Balances load when there are some processes with no particles.
**Idea**
If a process has zero particles, it requests the process with the
highest number of particles(at the start of the algorithm) for
particles. The process may or may not donate particles. If the zero
particle proc gets particles from this process, it will send empty
requests to the rest of the non-zero particle procs. Each zero particle
proc does this until it finds the first process ready to donate
particles.
**Algorithm**
- if process is zero particle proc, then starting with the proc
having highest number of proc start requesting all other procs,
till another zero particle proc is reached.
- if process is non-zero particle proc, then starting with the first
proc having zero particles, respond to requests from each proc.
"""
num_procs = self.num_procs
self.procs_to_communicate = self._get_procs_to_communicate(
self.particles_per_proc,
range(self.num_procs))
if self.particles_per_proc[self.pid] == 0:
self._zero_request_particles()
else:
self._zero_donate_particles()
def _get_procs_to_communicate(self, particles_per_proc, procs_to_communicate):
"""
Returns the list of procs in correct order to communicate with during
load balancing. The procs will be same as in the list
procs_to_communicate but will be ordered properly in order to avoid any
deadlocks.
The returned proc list will have process id's sorted in increasing order
of the number of particles in them. In case of ties, lower process id
will appear before a higher process id.
**Parameters**
- particles_per_proc - the number of particles with EVERY processor
in the world.
- procs_to_communicate - list of procs to communicate with while
load balancing.
"""
proc_order = list(numpy.argsort(particles_per_proc, kind='mergesort'))
for i in range(len(proc_order)-1):
if particles_per_proc[proc_order[i]] ==\
particles_per_proc[proc_order[i+1]]:
if proc_order[i] > proc_order[i+1]:
# swap the two
temp = proc_order[i]
proc_order[i] = proc_order[i+1]
proc_order[i+1] = temp
# select from this sorted order, the procs in procs_to_communicate.
output_procs = []
for proc in proc_order:
if procs_to_communicate.count(proc) == 1:
output_procs.append(proc)
return output_procs
def normal_lb_pass1(self, pid):
""" Request processors having more particles than self to donate
**Algorithm**
- send request for particles to pid.
- recv reply.
- depending on reply add new particles to self.
**Data sent/received**
- check if we need more particles.
- if yes
- send a dictionary in the format given below.
- receive a dictionary of blocks with particles for them, the
dictionary could be empty.
- add particles received in the particle arrays as real
particles.
- if no
- send a dictionary in the format given below.
- receive an empty dictionary.
"""
logger.debug('Requesting %d for particles'%(pid))
send_data = self._build_particle_request()
self.comm.send(send_data, dest=pid, tag=TAG_LB_PARTICLE_REQUEST)
data = self.comm.recv(source=pid, tag=TAG_LB_PARTICLE_REPLY)
particle_data = data['particles']
self.cell_manager.add_local_particles_to_parray(particle_data)
logger.debug('req recvd: DONE with recv: %r'%data)
def normal_lb_pass2(self, pid):
""" Process requests from processors with lesser particles than self.
Algorithm:
----------
- recv request from pid.
- if pid requested particles
- check if we have particles enough to give.
- if yes, choose an appropriate block(s), extract particles and send.
"""
logger.debug('Processing request from %d'%(pid))
comm = self.comm
arrays = self.cell_manager.arrays_to_bin
num_particles = sum(map(ParticleArray.get_number_of_particles, arrays))
request = comm.recv(source=pid, tag=TAG_LB_PARTICLE_REQUEST)
reply = self._build_particle_request_reply(request, pid)
comm.send(reply, dest=pid, tag=TAG_LB_PARTICLE_REPLY)
logger.debug('process request DONE with reply: %r'%reply)
def _build_particle_request(self):
""" Build the dictionary to be sent as a particle request. """
arrays = self.cell_manager.arrays_to_bin
num_particles = sum(map(ParticleArray.get_number_of_particles, arrays))
data = {}
if num_particles < self.ideal_load:
data['need_particles'] = True
data['num_particles'] = num_particles
else:
data['need_particles'] = False
return data
def _build_particle_request_reply(self, request, pid):
""" Build the reply to be sent in response to a request. """
arrays = self.cell_manager.arrays_to_bin
num_particles = sum(map(ParticleArray.get_number_of_particles, arrays))
reply = {}
if request['need_particles'] == False:
logger.debug('%d request for NO particles'%(pid))
reply['particles'] = {}
return reply
num_particles_in_pid = request['num_particles']
# check if pid has more particles than us.
if num_particles_in_pid >= num_particles:
logger.debug('%d has more particles that %d'%(pid, self.pid))
reply['particles'] = {}
return reply
# if our number of particles is within the threshold, do not donate
# particles.
if abs(self.ideal_load-num_particles) < self.threshold_margin:
if (not (num_particles-num_particles_in_pid) >
self.threshold_margin):
logger.debug('Need not donate - not overloaded')
reply['particles'] = {}
return reply
# if we have only one block, do not donate.
if len(self.cell_manager.proc_map.local_block_map) == 1:
logger.debug('Have only one block - will not donate')
reply['particles'] = {}
return reply
# get one or more blocks to send to pid
data = self._get_particles_for_neighbor_proc(pid)
reply['particles'] = data
return reply
def _get_particles_for_neighbor_proc(self, pid):
""" Returns particles (in blocks) to be moved to pid for processing """
self.block_nbr_proc = self.construct_nbr_block_info(self.block_proc)
# get one or more blocks to send to pid
pidr = self.pid
if self.adaptive:
num_iters = 10
else:
num_iters = 1
blocks = []
for i in range(num_iters):
np = self.particles_per_proc[pidr]
npr = self.particles_per_proc[pid]
if np <= npr or np < self.ideal_load-self.threshold_margin/2 or npr >= self.ideal_load+self.threshold_margin/2:
np_reqd = 0
break
else:
mean = (np+npr)/2
if mean < self.ideal_load-self.threshold_margin/2:
np_reqd = np-self.ideal_load+self.threshold_margin/2
elif mean > self.ideal_load+self.threshold_margin/2:
np_reqd = np-self.ideal_load-self.threshold_margin/2
else:
np_reqd = np - mean
if self.adaptive:
blk = self._get_blocks_for_neighbor_proc2(pid, pidr, self.proc_block_np[pidr], np_reqd)
else:
blk = self._get_blocks_for_neighbor_proc(pid, self.proc_block_np[pidr])
for bid in blk:
self._update_block_pid_info(bid, pidr, pid)
blocks.extend(blk)
#blocks_for_nbr = self._get_blocks_for_neighbor_proc(pid,
# self.proc_map.local_block_map,
# self.block_nbr_proc)
blocks_for_nbr = blocks
block_dict = {}
for bid in blocks_for_nbr:
block_dict[bid] = []
for cid in self.proc_map.cell_map[bid]:
block_dict[bid].append(self.cell_manager.cells_dict[cid])
del self.proc_map.cell_map[bid]
if block_dict:
# if all blocks are being sent away, keep the last cid with self
if len(block_dict) == len(self.proc_map.local_block_map):
del block_dict[bid]
particles = self.cell_manager.create_new_particle_copies(block_dict)
else:
logger.debug('No blocks found for %d'%(pid))
particles = {}
return particles
def _zero_request_particles(self):
""" Requests particles from processors with some particles. """
arrays = self.cell_manager.arrays_to_bin
comm = self.comm
i = self.num_procs - 1
req = {}
done = False
while i > 0 and done == False:
pid = self.procs_to_communicate[i]
np = self.particles_per_proc[pid]
if np == 0:
done = True
continue
num_particles = sum(map(ParticleArray.get_number_of_particles, arrays))
req['num_particles'] = num_particles
if num_particles > 0:
req['need_particles'] = False
else:
req['need_particles'] = True
comm.send(req, dest=pid, tag=TAG_LB_PARTICLE_REQUEST)
data = comm.recv(source=pid, tag=TAG_LB_PARTICLE_REPLY)
# add the particles in the parray
particles = data['particles']
self.cell_manager.add_local_particles_to_parray(particles)
i -= 1
def _zero_donate_particles(self):
""" Respond to a request for particles from a zero particle process. """
comm = self.comm
i = 0
reply = {}
done = False
while i < self.num_procs and done == False:
pid = self.procs_to_communicate[i]
np = self.particles_per_proc[pid]
if np > 0:
done = True
continue
# receive the request from pid
req = comm.recv(source=pid, tag=TAG_LB_PARTICLE_REQUEST)
reply = self._process_zero_proc_request(pid, req)
comm.send(reply, dest=pid, tag=TAG_LB_PARTICLE_REPLY)
i += 1
def _process_zero_proc_request(self, pid, request):
""" Construct reply for request from process with no particles """
if request['need_particles'] == False:
return {'particles':{}}
num_particles_with_pid = request['num_particles']
if num_particles_with_pid > 0:
logger.warn('Invalid request from %d'%(pid))
return {'particles':{}}
particles = self._get_boundary_blocks_to_donate(pid)
return {'particles':particles}
def _get_boundary_blocks_to_donate(self, pid):
""" Get boundary blocks to be donated to proc with no particles. """
self.block_nbr_proc = self.construct_nbr_block_info(self.block_proc)
blocks_for_proc = self._get_blocks_for_zero_proc(pid,
self.proc_map.local_block_map,
self.block_nbr_proc)
block_dict = {}
for bid in blocks_for_proc:
block_dict[bid] = []
for cid in self.proc_map.cell_map[bid]:
block_dict[bid].append(self.cell_manager.cells_dict[cid])
del self.proc_map.cell_map[bid]
del self.proc_map.local_block_map[bid]
if block_dict:
# if all blocks are being sent away, keep the last cid with self
if len(block_dict) == len(self.proc_map.local_block_map):
del block_dict[bid]
particles = self.cell_manager.create_new_particle_copies(block_dict)
else:
logger.debug('No blocks found for %d'%(pid))
particles = {}
return particles
def calc_load_thresholds(self, particles_per_proc):
self.total_particles = sum(self.particles_per_proc)
self.ideal_load = float(self.total_particles) / self.num_procs
self.threshold_margin = self.ideal_load * self.threshold_ratio / 100.
self.lower_threshold = self.ideal_load - self.threshold_margin
self.upper_threshold = self.ideal_load + self.threshold_margin
for i in range(self.num_procs):
self.load_difference[i] = (self.particles_per_proc[i] -
self.ideal_load)
def load_balance_func_serial(self, distr_func='single', **args):
""" Perform load balancing serially by gathering all data on root proc
**Algorithm**
- on root proc
- Compute some statistics
- Find the number of real particles in all processors.
- Find the total number of particles.
- Find the mean number of particles with each processor.
- If number of particles with each processor is within a
particular threshold from the mean, load is balanced, exit.
- Sort processor ids in increasing order of number of particles
with them. In case of multiple processors having the same
number of particles, arrange them in ascending order of pid.
- If there are some processors with 0 particles, communication
among all processors.
- If no such processors are there, each processor shall
communicate with adjacent neighbors.
- collect all cells and number of particles on each proc on root
- distribute particles on root proc using same algorithm as for
distributed load balancing
- send the info to send/recv cells to all procs
- BARRIER.
- bin particles top down.
- update processor map.
- update neighbor information.
- lb_iterations += 1
"""
redistr_func = getattr(self, 'load_redistr_'+distr_func)
self.balancing_done = False
current_balance_iteration = 0
self.load_difference = [0] * self.num_procs
self._gather_block_particles_info()
old_distr = {}
for proc_no, cells in enumerate(self.proc_block_np):
for cellid in cells:
old_distr[cellid] = proc_no
self.old_distr = old_distr
self.block_proc = {}
self.block_proc.update(old_distr)
#print '(%d)'%self.pid, self.block_proc
self.block_nbr_proc = self.construct_nbr_block_info(self.block_proc)
while self.balancing_done == False and self.pid == 0:
logger.info('Load Balance iteration %d -------------------' % (
current_balance_iteration))
if current_balance_iteration >= self.lb_max_iterations:
self.balancing_done = True
logger.info('MAX LB ITERATIONS EXCEEDED')
continue
self.load_balance_serial_iter(redistr_func, **args)
current_balance_iteration += 1
# do the actual transfer of particles now
self.redistr_cells(self.old_distr, self.block_proc)
logger.info('load distribution : %r : %r'%(set(self.block_proc.values()),
self.particles_per_proc))
# update the cell information.
self.cell_manager.remove_remote_particles()
self.cell_manager.delete_empty_cells()
self.cell_manager.rebin_particles()
self.proc_map.glb_update_proc_map(self.cell_manager.cells_dict)
#assert len(self.proc_map.conflicts) == 0
#recv_particles = self.proc_map.resolve_procmap_conflicts({})
self.proc_map.find_region_neighbors()
#self.cell_manager.add_entering_particles_from_neighbors(recv_particles)
logger.info('waiting for lb to finish')
self.comm.Barrier()
if logger.getEffectiveLevel() <= 20: # only for level <= INFO
cell_np = {}
np = 0
for cellid, cell in self.cell_manager.cells_dict.items():
cell_np[cellid] = cell.get_number_of_particles()
np += cell_np[cellid]
logger.info('(%d) %d particles in %d cells' % (self.pid, np, len(cell_np)))
def load_balance_serial_iter(self, redistr_func, **args):
""" A single iteration of serial load balancing """
# get the number of particles with each process.
#self.particles_per_proc = self.collect_num_particles()
self.calc_load_thresholds(self.particles_per_proc)
min_diff = min(self.load_difference)
max_diff = max(self.load_difference)
if (abs(min_diff) < self.threshold_margin and max_diff <
self.threshold_margin):
self.balancing_done = True
logger.info('BALANCE ACHIEVED')
logger.debug('Num particles are : %s' % (self.particles_per_proc))
return
if self.particles_per_proc == self.prev_particle_count and self.pid == 0:
# meaning that the previous load balancing iteration did not
# change the particle counts, we do not do anything now.
self.balancing_done = True
logger.info('Load unchanged')
return
if logger.getEffectiveLevel() <= 20: # only for level <= INFO
logger.debug('Total particles : %d' % (self.total_particles))
logger.debug('Ideal load : %d' % (self.ideal_load))
logger.debug('Load Difference : %s' % (self.load_difference))
logger.info('Particle counts : %s' % (self.particles_per_proc))
logger.debug('Threshold margin: %f' % (self.threshold_margin))
logger.debug('Upper threshold : %f' % (self.upper_threshold))
logger.debug('Lower threshold : %f' % (self.lower_threshold))
if not self.balancing_done:
# store the old particle counts in prev_particle_count
self.prev_particle_count[:] = self.particles_per_proc
self.block_proc, self.particles_per_proc = redistr_func(
self.block_proc, self.proc_block_np, **args)
def _gather_block_particles_info(self):
self.particles_per_proc = [0] * self.num_procs
block_np = {}
for bid, cells in self.cell_manager.proc_map.cell_map.iteritems():
block_np[bid] = 0
for cid in cells:
block_np[bid] += self.cell_manager.cells_dict[cid].get_number_of_particles()
self.block_np = block_np
self.proc_block_np = self.comm.gather(block_np, root=0)
#print '(%d)'%self.pid, self.proc_block_np
if self.proc_block_np is None:
self.proc_block_np = []
for i, c in enumerate(self.proc_block_np):
for cnp in c.values():
self.particles_per_proc[i] += cnp
logger.debug('(%d) %r' %(self.pid, self.particles_per_proc))
self.particles_per_proc = self.comm.bcast(self.particles_per_proc, root=0)
logger.debug('(%d) %r' % (self.pid, self.particles_per_proc))
def redistr_cells(self, old_distr, new_distr):
""" redistribute blocks in the procs as per the new distr,
using old_distr to determine the incremental data to be communicated
old_distr and new_distr are used only on the root proc
old_distr and new_distr are dict of bid:pid and need only contain
changed blocks
"""
logging.debug('redistributing blocks')
r = range(self.num_procs)
sends = [[[] * self.num_procs for i in r] for j in r]
recvs = [[[] * self.num_procs for i in r] for j in r]
for bid, opid in old_distr.iteritems():
npid = new_distr[bid]
if opid != npid:
recvs[npid][opid].append(bid)
sends[opid][npid].append(bid)
sends = self.comm.scatter(sends, root=0)
recvs = self.comm.scatter(recvs, root=0)
# now each proc has all the blocks it needs to send/recv from other procs
logging.debug('sends' + str([len(i) for i in sends]))
logging.debug('recvs' + str([len(i) for i in recvs]))
# greater pid will recv first
for i in range(self.pid):
self.recv_particles(recvs[i], i)
self.send_particles(sends[i], i)
# smaller pid will send first
for i in range(self.pid + 1, self.num_procs):
self.send_particles(sends[i], i)
self.recv_particles(recvs[i], i)
logging.debug('redistribution of blocks done')
def load_redistr_single(self, block_proc=None, proc_block_np=None,
adaptive=False, **args):
""" The load balance algorithm running on root proc
The algorithm is same as the parallel normal load balancing algorithm,
except zero proc handling that is run completely on the root proc
"""
self.adaptive = adaptive
self.procs_to_communicate = self._get_procs_to_communicate(
self.particles_per_proc, range(self.num_procs))
#self.procs_to_communicate = numpy.argsort(self.particles_per_proc)[::-1]
num_procs = len(self.procs_to_communicate)
if self.particles_per_proc[self.procs_to_communicate[-1]] == 0:
# load balancing with zero_procs
for i in range(num_procs):
pid = self.procs_to_communicate[i]
for j in range(num_procs-i):
if self.particles_per_proc[pid] != 0:
break
pidr = self.procs_to_communicate[-j-1]
self.single_lb_transfer_blocks(pid, pidr)
else:
# pass1 pid = pid, pass2 pid = pidr
for i in range(num_procs):
pid = self.procs_to_communicate[i]
for j in range(num_procs-i):
pidr = self.procs_to_communicate[-j-1]
self.single_lb_transfer_blocks(pid, pidr)
logger.debug('load_redistr_single done')
return self.block_proc, self.particles_per_proc
def load_redistr_auto(self, block_proc=None, proc_block_np=None, **args):
""" load redistribution by automatic selection of method
If only one proc has all the particles, then use the
load_redistr_geometric method, else use load_redistr_simple
"""
non_zeros = len([1 for p in self.particles_per_proc if p > 0])
if non_zeros == 1:
logger.info('load_redistr_auto: geometric')
block_proc, np_per_proc = self.load_redistr_geometric(self.block_proc,
self.proc_block_np)
self.balancing_done = False
self.block_nbr_proc = self.construct_nbr_block_info(block_proc)
block_np = {}
for proc,c_np in enumerate(self.proc_block_np):
block_np.update(c_np)
self.proc_block_np = [{} for i in range(self.num_procs)]
for cid,pid in block_proc.iteritems():
self.proc_block_np[pid][cid] = block_np[cid]
return block_proc, np_per_proc
else:
logger.info('load_redistr_auto: serial')
return self.load_redistr_single(self.block_proc, self.proc_block_np,
**args)
def single_lb_transfer_blocks(self, pid, pidr):
""" Allocate particles from proc pidr to proc pid (on root proc) """
num_particles = self.particles_per_proc[pid]
if num_particles < self.ideal_load:
need_particles = True
else:
need_particles = True
num_particlesr = self.particles_per_proc[pidr]
if num_particles == 0 and num_particlesr > 1:
# send a block to zero proc
blocks = self._get_blocks_for_zero_proc(pid, self.proc_block_np[pidr])
for bid in blocks:
self._update_block_pid_info(bid, pidr, pid)
return blocks
logger.debug('%d %d %d %d transfer' % (pid, num_particles, pidr, num_particlesr))
if need_particles == False:
logger.debug('%d request for NO particles' % (pid))
return []
# check if pid has more particles than pidr
if num_particles >= num_particlesr:
logger.debug('%d has more particles that %d' % (pid, pidr))
return []
# if number of particles in pidr is within the threshold, do not donate
# particles
if abs(self.ideal_load - num_particlesr) < self.threshold_margin:
if (not (num_particlesr - num_particles) > self.threshold_margin):
logger.debug('Need not donate - not overloaded')
return []
# if pidr has only one block, do not donate
if len(self.proc_block_np[pidr]) == 1:
logger.debug('Have only one block - will not donate')
return []
# get one or more blocks to send to pid
if self.adaptive:
num_iters = 10
else:
num_iters = 1
blocks = []
for i in range(num_iters):
np = self.particles_per_proc[pidr]
npr = self.particles_per_proc[pid]
if np <= npr or np < self.ideal_load-self.threshold_margin/2 or npr >= self.ideal_load+self.threshold_margin/2:
np_reqd = 0
break
else:
mean = (np+npr)/2
if mean < self.ideal_load-self.threshold_margin/2:
np_reqd = np-self.ideal_load+self.threshold_margin/2
elif mean > self.ideal_load+self.threshold_margin/2:
np_reqd = np-self.ideal_load-self.threshold_margin/2
else:
np_reqd = np - mean
if self.adaptive:
blk = self._get_blocks_for_neighbor_proc2(pid, pidr, self.proc_block_np[pidr], np_reqd)
else:
blk = self._get_blocks_for_neighbor_proc(pid, self.proc_block_np[pidr])
for bid in blk:
self._update_block_pid_info(bid, pidr, pid)
blocks.extend(blk)
return blocks
def recv_particles(self, blocks, pid):
""" recv particles from proc pid """
# do not communicate if nothing is to be transferred
if not blocks: # blocks is empty
return
logger.debug('Receiving particles in %d blocks from %d' % (len(blocks), pid))
particles = self.comm.recv(source=pid, tag=TAG_LB_PARTICLE_REPLY)
self.cell_manager.add_local_particles_to_parray(particles)
logger.debug('Received particles from %d' % (pid))
def send_particles(self, blocks, pid):
""" send particles in blocks to proc pid """
# do not communicate if nothing is to be transferred
if not blocks: # blocks is empty
return
logger.debug('Sending particles in %d blocks to %d' % (len(blocks), pid))
particles = self._build_particles_to_send_from_blocks(blocks, pid)
self.comm.send(particles, dest=pid, tag=TAG_LB_PARTICLE_REPLY)
logger.debug('Sent particles to %d' % (pid))
def _build_particles_to_send_from_blocks(self, blocks, pid):
"""
Build the reply to be sent in response to a request.
Returns particles blocks to be moved to pid for processing
"""
cell_dict = {}
for bid in blocks:
for cid in self.cell_manager.proc_map.cell_map[bid]:
cell = self.cell_manager.cells_dict[cid]
cell_dict[cid] = [cell]
particles = self.cell_manager.create_new_particle_copies(cell_dict)
return particles
def _get_blocks_for_zero_proc(self, pid, blocks, block_nbr_proc=None):
""" return a block to be sent to nbr zero proc `pid`
blocks is the sequence of blocks from which to choose the blocks to send
Algorithm:
----------
- find all boundary blocks.
- choose the one with the least number of neighbors to donate
"""
if block_nbr_proc is None:
block_nbr_proc = self.block_nbr_proc
max_empty_count = -1
blocks_for_nbr = []
for bid in blocks:
empty_count = block_nbr_proc[bid].get(-1, 0)
if empty_count > max_empty_count:
max_empty_count = empty_count
blocks_for_nbr = [bid]
return blocks_for_nbr
def _get_blocks_for_neighbor_proc(self, pid, blocks, block_nbr_proc=None):
""" return blocks to be sent to nbr proc `pid`
Parameters:
-----------
- `blocks` - sequence of blocks from which to choose the blocks to send
- `block_nbr_proc` - (self.block_nbr_proc) a dictionary mapping bid to
a dictionary of proc to num_nbr_blocks_in_proc as returned by
`construct_nbr_block_info()`
Algorithm:
----------
- Get all blocks with self that have remote neighbors.
- Of these get all particles that have 'pid' as neighbor.
- Of these choose the blocks with the msximum number of neighbor
blocks in pid.
"""
if block_nbr_proc is None:
block_nbr_proc = self.block_nbr_proc
max_neighbor_count = 1
blocks_for_nbr = []
for bid in blocks:
bpid = self.block_proc[bid]
local_nbr_count = block_nbr_proc[bid].get(bpid, 0)
remote_nbr_count = 26 - block_nbr_proc[bid].get(-1, 0) - local_nbr_count
if remote_nbr_count == 0:
#logger.debug('%s has no remote nbrs'%(cid))
continue
num_nbrs_in_pid = block_nbr_proc[bid].get(pid)
if not num_nbrs_in_pid:
continue
if num_nbrs_in_pid > max_neighbor_count:
max_neighbor_count = num_nbrs_in_pid
blocks_for_nbr = [bid]
elif num_nbrs_in_pid == max_neighbor_count:
blocks_for_nbr.append(bid)
if not blocks_for_nbr:
logger.debug('No blocks found for %d' % (pid))
return blocks_for_nbr
def _get_blocks_for_neighbor_proc2(self, pid, pidr, blocks, np_reqd, block_nbr_proc=None):
""" return blocks to be sent to nbr proc `pid` """
if block_nbr_proc is None:
block_nbr_proc = self.block_nbr_proc
blocks_for_nbr = []
block_score = {}
# get score for each block
x = y = z = 0 # for centroid of blocks
max_neighbor_count = 0
for bid in blocks:
bpid = self.block_proc[bid]
local_nbr_count = block_nbr_proc[bid].get(bpid, 0)
remote_nbr_count = 26 - block_nbr_proc[bid].get(-1, 0) - local_nbr_count
num_nbrs_in_pid = block_nbr_proc[bid].get(pid, 0)
if max_neighbor_count < num_nbrs_in_pid:
max_neighbor_count = num_nbrs_in_pid
block_score.clear()
block_score[bid] = 2*num_nbrs_in_pid + remote_nbr_count - local_nbr_count
elif max_neighbor_count == num_nbrs_in_pid:
block_score[bid] = 2*num_nbrs_in_pid + remote_nbr_count - local_nbr_count
x += bid.x
y += bid.y
z += bid.z
if max_neighbor_count == 0:
return []
num_blocks = float(len(blocks))
x /= num_blocks
y /= num_blocks
z /= num_blocks
block_dist = {}
for bid in blocks:
block_dist[bid] = ((bid.x-x)**2+(bid.y-y)**2+(bid.z-z)**2)**0.5
mean_dist = numpy.average(block_dist.values())
for bid in block_score:
block_score[bid] += block_dist[bid] / mean_dist
# allocate block for neighbor
sblocks = sorted(block_score, key=block_score.get, reverse=True)
particles_send = 0
block_np = self.proc_block_np[pidr]
max_score = block_score[sblocks[0]]
#print block_np
for bid in sblocks:
#if max_neighbor_count > block_nbr_proc[bid].get(pid, 0):
# continue
particles_send += block_np[bid]
if particles_send > np_reqd or block_score[bid] < max_score-2:
particles_send -= block_np[bid]
break
blocks_for_nbr.append(bid)
if not blocks_for_nbr:
logger.debug('No blocks found for %d' % (pid))
return blocks_for_nbr
@classmethod
def construct_nbr_block_info(self, block_proc, nbr_for_blocks=None):
""" Construct and return the dict of bid:{pid:nnbr} having the neighbor
pid information for each block.
If nbr_for_blocks is specified as a sequence of blocks, only these
blocks' nbrs will be computed """
block_nbr_proc = {} # bid:{pid:nnbr}
if nbr_for_blocks is None:
nbr_for_blocks = block_proc
for bid in nbr_for_blocks:
nbrs = []
nbrcnt = {}
py_construct_immediate_neighbor_list(bid, nbrs, False)
for nbr in nbrs:
p = block_proc.get(nbr, -1) # -1 is count of missing neighbors
nbrcnt[p] = nbrcnt.get(p, 0) + 1
block_nbr_proc[bid] = nbrcnt
return block_nbr_proc
def _update_block_pid_info(self, bid, old_pid, new_pid):
""" Update the block_nbr_proc dict to reflect a change in the pid of
block bid to new_pid """
#print bid, old_pid, new_pid
self.block_proc[bid] = new_pid
block_nbr_proc = self.block_nbr_proc
nbrs = []
py_construct_immediate_neighbor_list(bid, nbrs, False)
for nbr in nbrs:
nbr_info = block_nbr_proc.get(nbr)
if nbr_info is not None:
nbr_info[old_pid] -= 1
nbr_info[new_pid] = nbr_info.get(new_pid, 0) + 1
self.proc_block_np[new_pid][bid] = self.proc_block_np[old_pid][bid]
del self.proc_block_np[old_pid][bid]
block_np = self.proc_block_np[new_pid][bid]
self.particles_per_proc[old_pid] -= block_np
self.particles_per_proc[new_pid] += block_np
###########################################################################
# simple method to assign some blocks to all procs based on geometry
# subdivision. The distribution is unsuitable as load balancer,
# but may provide a good method to initiate laod balancing
###########################################################################
def load_redistr_geometric(self, block_proc, proc_block_np, allow_zero=False, **args):
""" distribute block_np to processors in a simple geometric way
**algorithm**
# get the distribution size of each dimension using `get_distr_size()`
based on the domain size of the block_np
# divide the domain into rectangular grids
# assign block_np to each processor
# check empty processors and divide block_np in processor having
more than average block_np to the empty processors
"""
num_procs = len(proc_block_np)
block_np = {}
for cnp in proc_block_np:
block_np.update(cnp)
proc_blocks, proc_num_particles = self.distribute_particles_geometric(
block_np, num_procs, allow_zero)
self.balancing_done = True
return self.get_block_proc(proc_blocks=proc_blocks), proc_num_particles
@staticmethod
def get_distr_sizes(l=1., b=1., h=1., num_procs=12):
"""return the number of clusters to be generated along each dimension
l,b,h are the size of the domain
return: s = ndarray of size 3 = number of divisions along each dimension
s[0]*s[1]*s[2] >= num_procs"""
x = numpy.array([l,b,h], dtype='float')
compprod = numpy.cumprod(x)[-1]
fac = (float(num_procs)/compprod)**(1.0/3)
s = x*fac
s = numpy.ceil(s)
cont = True
while cont:
ss = numpy.argsort(s)
if (s[ss[2]]-1)*(s[ss[1]])*(s[ss[0]]) >= num_procs:
s[ss[2]] -= 1
continue
elif (s[ss[2]])*(s[ss[1]]-1)*(s[ss[0]]) >= num_procs:
s[ss[1]] -= 1
continue
elif (s[ss[2]])*(s[ss[1]])*(s[ss[0]]-1) >= num_procs:
s[ss[0]] -= 1
continue
else:
cont = False
#print 'sizes: %s'%(str(s))
#print distortion(*s/x)
return s
@staticmethod
def distribute_particles_geometric(block_np, num_procs, allow_zero=False):
""" distribute block_np to processors in a simple way
**algorithm**
# get the distribution size of each dimension using `get_distr_size()`
based on the domain size of the block_np
# divide the domain into rectangular grids
# assign block_np to each processor
# check empty processors and divide block_np in processor having
more than average block_np to the empty processors
"""
num_blocks = len(block_np)
block_arr = numpy.empty((num_blocks, 3))
num_particles_arr = numpy.empty((num_blocks,), dtype='int')
for i,block_id in enumerate(block_np):
block_arr[i,0] = block_id.x
block_arr[i,1] = block_id.y
block_arr[i,2] = block_id.z
num_particles_arr[i] = block_np[block_id]
np_per_proc = sum(num_particles_arr)/num_procs
lmin = numpy.min(block_arr[:,0])
bmin = numpy.min(block_arr[:,1])
hmin = numpy.min(block_arr[:,2])
# range of blocks in each dimension
l = numpy.max(block_arr[:,0])+1 - lmin
b = numpy.max(block_arr[:,1])+1 - bmin
h = numpy.max(block_arr[:,2])+1 - hmin
# distribution sizes in each dimension
s = LoadBalancer.get_distr_sizes(l,b,h,num_procs)
ld = l/s[0]
bd = b/s[1]
hd = h/s[2]
# allocate regions to procs
# deficit of actual processes to allocate
deficit = int(numpy.cumprod(s)[-1] - num_procs)
# sorted s
ss = numpy.argsort(s)
# reversed dict (value to index)
rss = numpy.empty(len(ss), dtype='int')
for i,si in enumerate(ss):
rss[si] = i
proc = 0
proc_blocks = [[] for i in range(num_procs)]
proc_map = {}
done = False
for i in range(int(s[ss[0]])):
for j in range(int(s[ss[1]])):
for k in range(int(s[ss[2]])):
if done:
done = False
continue
proc_map[tuple(numpy.array((i,j,k),dtype='int')[rss])] = proc
proc += 1
if deficit > 0 and k==0:
deficit -= 1
proc_map[tuple(numpy.array((i,j,k+1),dtype='int')[rss])] = proc-1
done = True
# allocate block_np to procs
proc_num_particles = [0 for i in range(num_procs)]
for i,block_id in enumerate(block_np):
index = (int((block_id.x-lmin)//ld), int((block_id.y-bmin)//bd),
int((block_id.z-hmin)//hd))
proc_blocks[proc_map[index]].append(block_id)
proc_num_particles[proc_map[index]] += block_np[block_id]
# return the distribution if procs with zero blocks are permitted
if allow_zero:
return proc_blocks, proc_num_particles
# add block_np to empty procs
proc_particles_s = numpy.argsort(proc_num_particles)
empty_procs = [proc for proc,np in enumerate(proc_num_particles) if np==0]
i = num_procs - 1
while len(empty_procs) > 0:
nparts = int(min(numpy.ceil(
proc_num_particles[proc_particles_s[i]]/float(np_per_proc)),
len(empty_procs)))
blocks = proc_blocks[proc_particles_s[i]]
nblocks = int((len(blocks)/float(nparts+1)))
proc_blocks[proc_particles_s[i]] = []
blocks_sorted = sorted(blocks, key=hash)
for j in range(nparts):
blocks2send = blocks_sorted[j*nblocks:(j+1)*nblocks]
proc_blocks[empty_procs[j]][:] = blocks2send
for bid in blocks2send:
proc_num_particles[empty_procs[j]] += block_np[bid]
proc_num_particles[proc_particles_s[i]] -= block_np[bid]
proc_blocks[proc_particles_s[i]][:] = blocks_sorted[(j+1)*nblocks:]
empty_procs[:nparts] = []
i -= 1
return proc_blocks, proc_num_particles
###########################################################################
@classmethod
def get_block_proc(self, proc_blocks):
block_proc = {}
for proc, bids in enumerate(proc_blocks):
block_proc.update(dict.fromkeys(bids, proc))
return block_proc
@classmethod
def get_load_imbalance(self, particles_per_proc):
"""return the imbalance in the load distribution = (max-avg)/max"""
total = sum(particles_per_proc)
avg = float(total)/len(particles_per_proc)
mx = max(particles_per_proc)
return (mx-avg)/mx
@classmethod
def get_quality(self, block_nbr_proc, block_proc, num_procs, ndim):
num_blocks = len(block_nbr_proc)
blocks_nbr = blocks_nbr_proc = procs_nbr = 0
max_nbrs = (3**ndim-1)
proc_nbrs = [set() for i in range(num_procs)]
for bid,proc_np in block_nbr_proc.iteritems():
pid = block_proc[bid]
blocks_nbr += 26 - proc_np.get(-1, 0) - proc_np.get(pid, 0)
blocks_nbr_proc += len(proc_np) - 1 - (-1 in proc_np)
proc_nbrs[pid].update(proc_np)
for pid, proc_nbrs_data in enumerate(proc_nbrs):
proc_nbrs_data.remove(-1)
proc_nbrs_data.remove(pid)
#print proc_nbrs
fac = num_blocks**((ndim-1.0)/ndim) * max_nbrs
blocks_nbr = blocks_nbr / fac
blocks_nbr_proc = blocks_nbr_proc / fac
procs_nbr = sum([len(i) for i in proc_nbrs])/float(num_procs)
return blocks_nbr, blocks_nbr_proc, procs_nbr
@classmethod
def get_metric(self, block_proc, particles_per_proc, ndim=None):
""" return a performance metric for the current load distribution """
if ndim is None:
# FIXME: detect the dimension of the problem
ndim = 2
imbalance = self.get_load_imbalance(particles_per_proc)
num_procs = len(particles_per_proc)
quality = self.get_quality(self.construct_nbr_block_info(block_proc),
block_proc, num_procs, ndim)
return (imbalance,) + quality
@classmethod
def plot(self, proc_blocks, show=True, save_filename=None):
try:
from enthought.mayavi import mlab
except:
logger.critical('LoadBalancer.plot(): need mayavi to plot')
return
block_idx = {}
#print [len(i) for i in proc_blocks]
i = 0
for procno, procblocks in enumerate(proc_blocks):
for block_id in procblocks:
block_idx[block_id] = i
i += 1
num_blocks = i
x = [0] * num_blocks
y = [0] * num_blocks
z = [0] * num_blocks
p = [0] * num_blocks
i = 0
for procno, procblocks in enumerate(proc_blocks):
for block_id in procblocks:
x[block_idx[block_id]] = block_id.x
y[block_idx[block_id]] = block_id.y
z[block_idx[block_id]] = block_id.z
p[block_idx[block_id]] = procno
i += 1
figure = mlab.figure(0, size=(1200,900))
plot = mlab.points3d(x, y, z, p, mode='cube', colormap='jet',
scale_mode='none', scale_factor=0.8, figure=figure)
engine = mlab.get_engine()
scene = engine.scenes[0]
scene.scene.parallel_projection = True
#scene.scene.camera.view_up = [0.0, 1.0, 0.0]
mlab.view(0,0)
if save_filename:
mlab.savefig(save_filename, figure=figure)
if show:
mlab.show()
@classmethod
def distribute_particle_arrays(cls, particle_arrays, num_procs, block_size,
max_iter=100, distr_func='single', **args):
"""Convenience function to distribute given particles into procs
Uses the load_balance_func_serial() function of LoadBalancer class to
distribute the particles. Balancing methods can be changed by passing
the same `args` as to the load_balance_func_serial method
"""
lb = get_load_balancer_class()()
lb.pid = 0
lb.num_procs = num_procs
lb.lb_max_iteration = max_iter
z = numpy.empty(0)
empty_props = []
constants = []
for pa in particle_arrays:
d = {}
for prop in pa.properties:
d[prop] = z
empty_props.append(d)
constants.append(pa.constants)
redistr_func = getattr(lb, 'load_redistr_'+distr_func)
#print redistr_func
lb.load_difference = [0] * lb.num_procs
# set cell size same as block size and operate on cells
cm = CellManager(particle_arrays, block_size, block_size)
#print 'num_cells=', len(cm.cells_dict), cm.block_size
lb.particles_per_proc = [0] * lb.num_procs
block_np = {}
for bid, cell in cm.cells_dict.iteritems():
block_np[bid] = cell.get_number_of_particles()
lb.proc_block_np = [{} for i in range(num_procs)]
lb.proc_block_np[0] = block_np
#print '(%d)'%self.pid, self.proc_block_np
for i, c in enumerate(lb.proc_block_np):
for cnp in c.values():
lb.particles_per_proc[i] += cnp
old_distr = {}
for proc_no, blocks in enumerate(lb.proc_block_np):
for bid in blocks:
old_distr[bid] = proc_no
lb.old_distr = old_distr
lb.block_proc = {}
lb.block_proc.update(old_distr)
#print '(%d)'%self.pid, self.block_proc
lb.block_nbr_proc = lb.construct_nbr_block_info(lb.block_proc)
lb.balancing_done = False
current_balance_iteration = 0
while lb.balancing_done == False and current_balance_iteration < max_iter:
#print '\riteration', current_balance_iteration,
lb.load_balance_serial_iter(redistr_func, **args)
current_balance_iteration += 1
na = len(cm.arrays_to_bin)
particle_arrays_per_proc = [[get_particle_array(**empty_props[j]) for j in range(na)] for
i in range(num_procs)]
cells_dict = cm.cells_dict
a2b = cm.arrays_to_bin
for bid, proc in lb.block_proc.iteritems():
cell = cells_dict[bid]
pid_list = []
cell.get_particle_ids(pid_list)
for i in range(na):
arr = particle_arrays_per_proc[proc][i]
arr.constants.update(constants[i])
arr.append_parray(a2b[i].extract_particles(pid_list[i]))
arr.set_name(a2b[i].name)
arr.set_particle_type(a2b[i].particle_type)
return particle_arrays_per_proc
@classmethod
def distribute_particles(cls, particle_array, num_procs, block_size,
max_iter=100, distr_func='auto', **args):
"""Same as distribute_particle_arrays but for a single particle array
"""
if isinstance(particle_array, (ParticleArray,)):
is_particle_array = True
pas = [particle_array]
else:
# assume particle_array is list of particle_arrays
is_particle_array = False
pas = particle_array
ret = cls.distribute_particle_arrays(pas, num_procs, block_size,
max_iter, distr_func, **args)
if is_particle_array:
ret = [i[0] for i in ret]
return ret
def get_load_balancer_class():
""" return load balancing class at the bottom of implementation hierarchy,
so that various types of load balancing methods can be used """
try:
from load_balancer_metis import LoadBalancerMetis as LoadBalancer
except ImportError:
try:
from load_balancer_mkmeans import LoadBalancerMKMeans as LoadBalancer
except ImportError:
try:
from load_balancer_sfc import LoadBalancerSFC as LoadBalancer
except ImportError:
pass
return LoadBalancer
| [
[
8,
0,
0.001,
0.0013,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0047,
0.0007,
0,
0.66,
0.1111,
715,
0,
1,
0,
0,
715,
0,
0
],
[
14,
0,
0.0053,
0.0007,
0,
0.66... | [
"\"\"\" Contains class to perform load balancing.\n\"\"\"",
"import logging",
"logger = logging.getLogger()",
"import numpy",
"from pysph.base.particle_array import ParticleArray, get_particle_array",
"from pysph.base.cell import CellManager, py_construct_immediate_neighbor_list",
"TAG_LB_PARTICLE_REQUE... |
""" Tests for the parallel cell manager """
import nose.plugins.skip as skip
raise skip.SkipTest("Dont run this test via nose")
from pysph.parallel.simple_block_manager import SimpleBlockManager
from pysph.base.particles import Particles
from pysph.base.particle_array import get_particle_array
from pysph.base.point import IntPoint
import numpy
import pylab
import time
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
rank = pid = comm.Get_rank()
def draw_cell(cell, color="b"):
centroid = base.Point()
cell.get_centroid(centroid)
half_size = 0.5 * cell.cell_size
x1, y1 = centroid.x - half_size, centroid.y - half_size
x2, y2 = x1 + cell.cell_size, y1
x3, y3 = x2, y1 + cell.cell_size
x4, y4 = x1, y3
pylab.plot([x1,x2,x3,x4,x1], [y1, y2, y3, y4,y1], color)
def draw_block(origin, block_size, block_id, color="r"):
half_size = 0.5 * block_size
x,y = [], []
xc = origin.x + ((block_id.x + 0.5) * proc_map.block_size)
yc = origin.y + ((block_id.y + 0.5) * proc_map.block_size)
x1, y1 = xc - half_size, yc - half_size
x2, y2 = x1 + block_size, y1
x3, y3 = x2, y2 + block_size
x4, y4 = x1, y3
pylab.plot([x1,x2,x3,x4,x1], [y1, y2, y3, y4,y1], color)
def draw_particles(cell, color="y"):
arrays = cell.arrays_to_bin
num_arrays = len(arrays)
index_lists = []
cell.get_particle_ids(index_lists)
x, y = [], []
for i in range(num_arrays):
array = arrays[i]
index_array = index_lists[i]
indices = index_lists[i].get_npy_array()
xarray, yarray = array.get('x','y')
for j in indices:
x.append(xarray[j])
y.append(yarray[j])
pylab.plot(x,y,color+"o")
def get_sorted_indices(cell):
index_lists = []
cell.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
index_array.sort()
print type(index_array)
return index_array
if pid == 0:
x = numpy.array( [0, 0.2, 0.4, 0.6, 0.8] * 5 )
y = numpy.array( [0.0, 0.0, 0.0, 0.0, 0.0,
0.2 ,0.2, 0.2, 0.2, 0.2,
0.4, 0.4, 0.4, 0.4, 0.4,
0.6, 0.6, 0.6, 0.6, 0.6,
0.8, 0.8, 0.8, 0.8, 0.8] )
x += 1e-10
y += 1e-10
h = numpy.ones_like(x) * 0.3/2.0
block_00 = 0, 1, 5, 6
block_10 = 2, 7
block_20 = 3, 4, 8, 9
block_01 = 10, 11
block_11 = 12
block_21 = 13, 14
block_02 = 15, 16, 20, 21
block_12 = 17, 22
block_22 = 18, 19, 23, 24
cids = [block_00, block_10, block_20,
block_01, block_11, block_21,
block_02, block_12, block_22]
if pid == 1:
x = numpy.array( [0.8, 1.0, 1.2, 1.4, 1.6] * 5 )
y = numpy.array( [0.0, 0.0, 0.0, 0.0, 0.0,
0.2, 0.2, 0.2, 0.2, 0.2,
0.4, 0.4, 0.4, 0.4, 0.4,
0.6, 0.6, 0.6, 0.6, 0.6,
0.8, 0.8, 0.8, 0.8, 0.8] )
x += 1e-10
y += 1e-10
h = numpy.ones_like(x) * 0.3/2.0
block_20 = 4, 9
block_30 = 1, 6
block_40 = 2, 3, 7, 8
block_50 = 4, 9
block_21 = 14
block_31 = 11
block_41 = 12, 13
block_51 = 14
block_22 = 15, 20
block_32 = 16, 21
block_42 = 17, 18, 22, 23
block_52 = 19, 24
cids = [block_20, block_30, block_40, block_50,
block_21, block_31, block_41, block_51,
block_22, block_32, block_42, block_52]
pa = get_particle_array(name="test"+str(rank), x=x, y=y, h=h)
particles = Particles(arrays=[pa,])
# create the block manager
pm = pm = SimpleBlockManager(block_scale_factor=2.0)
pm.initialize(particles)
cm = pm.cm
assert ( abs(pm.block_size - 0.3) < 1e-15 )
assert (pm.block_size == cm.cell_size)
cells_dict = cm.cells_dict
pmap = pm.processor_map
assert (len(cells_dict) == len(cids))
# call an update
pm.update()
# test the processor map's local and global cell map
local_cell_map = pmap.local_cell_map
global_cell_map = pmap.global_cell_map
assert (len(local_cell_map) == len(cells_dict))
for cid in local_cell_map:
assert( cid in cells_dict )
assert( list(local_cell_map[cid])[0] == rank )
if rank == 0:
other_cids = comm.recv(source=1)
comm.send(cids, dest=1)
if rank == 1:
comm.send(cids, dest=0)
other_cids = comm.recv(source=0)
conflicting_cells = IntPoint(2,0,0), IntPoint(2,1,0), IntPoint(2,2,0)
# check the conflicting cells
for cid in conflicting_cells:
assert ( cid in global_cell_map )
pids = list(global_cell_map[cid])
pids.sort()
assert ( pids == [0,1] )
# check the cells_to_send_list
cells_to_send = pmap.get_cell_list_to_send()
if rank == 0:
expected_list = [IntPoint(1,0), IntPoint(1,1), IntPoint(1,2),
IntPoint(2,0), IntPoint(2,1), IntPoint(2,2)]
cell_list = cells_to_send[1]
if rank == 1:
expected_list = [IntPoint(2,0), IntPoint(2,1), IntPoint(2,2),
IntPoint(3,0), IntPoint(3,1), IntPoint(3,2)]
cell_list = cells_to_send[0]
for cid in expected_list:
assert (cid in cell_list)
pa = pm.arrays[0]
print rank, pa.num_real_particles, pa.get_number_of_particles()
| [
[
8,
0,
0.0049,
0.0049,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0148,
0.0049,
0,
0.66,
0.0256,
548,
0,
1,
0,
0,
548,
0,
0
],
[
1,
0,
0.0296,
0.0049,
0,
0.66... | [
"\"\"\" Tests for the parallel cell manager \"\"\"",
"import nose.plugins.skip as skip",
"from pysph.parallel.simple_block_manager import SimpleBlockManager",
"from pysph.base.particles import Particles",
"from pysph.base.particle_array import get_particle_array",
"from pysph.base.point import IntPoint",
... |
""" Tests for the parallel cell manager """
import nose.plugins.skip as skip
raise skip.SkipTest("Dont run this test via nose")
import pysph.base.api as base
import pysph.parallel.api as parallel
import numpy
import pylab
import time
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
pid = comm.Get_rank()
def draw_cell(cell, color="b"):
centroid = base.Point()
cell.get_centroid(centroid)
half_size = 0.5 * cell.cell_size
x1, y1 = centroid.x - half_size, centroid.y - half_size
x2, y2 = x1 + cell.cell_size, y1
x3, y3 = x2, y1 + cell.cell_size
x4, y4 = x1, y3
pylab.plot([x1,x2,x3,x4,x1], [y1, y2, y3, y4,y1], color)
def draw_block(origin, block_size, block_id, color="r"):
half_size = 0.5 * block_size
x,y = [], []
xc = origin.x + ((block_id.x + 0.5) * proc_map.block_size)
yc = origin.y + ((block_id.y + 0.5) * proc_map.block_size)
x1, y1 = xc - half_size, yc - half_size
x2, y2 = x1 + block_size, y1
x3, y3 = x2, y2 + block_size
x4, y4 = x1, y3
pylab.plot([x1,x2,x3,x4,x1], [y1, y2, y3, y4,y1], color)
def draw_particles(cell, color="y"):
arrays = cell.arrays_to_bin
num_arrays = len(arrays)
index_lists = []
cell.get_particle_ids(index_lists)
x, y = [], []
for i in range(num_arrays):
array = arrays[i]
index_array = index_lists[i]
indices = index_lists[i].get_npy_array()
xarray, yarray = array.get('x','y')
for j in indices:
x.append(xarray[j])
y.append(yarray[j])
pylab.plot(x,y,color+"o")
def get_sorted_indices(cell):
index_lists = []
cell.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
index_array.sort()
print type(index_array)
return index_array
xc = numpy.arange(0,1.0, 0.2)
x, y = numpy.meshgrid(xc,xc)
x = x = x.ravel()
y = y = y.ravel()
h = h = numpy.ones_like(x) * 0.25
dx = dy = 0.2
dx = dx
block_size = 0.5
cell_size = 0.5
block_000_indices = 0,1,2,5,6,7,10,11,12
block_100_indices = 3,4,8,9,13,14
block_010_indices = 15,16,17,20,21,22
block_110_indices = 18,19,23,24
name = "rank" + str(pid)
pa = pa = base.get_particle_array(name="test", x=x, y=y, h=h)
if pid == 1:
pa.x += 1.0
pa.x += 1e-10
if pid == 2:
pa.y += 9
if pid == 3:
pa.x += 9; pa.y += 9
# create the cell manager
cm = cm = parallel.ParallelCellManager(arrays_to_bin=[pa,],
max_radius_scale=2.0,
dimension=2.0,
load_balancing=False,
initialize=False,
min_cell_size=0.5)
# find global min and max
cm.update_global_properties()
# compute block size
cm.compute_block_size(0.5)
# compute cell size
cm.compute_cell_size(0,0)
# setup array indices.
cm.py_rebuild_array_indices()
# setup the cells_dict
cm.py_setup_cells_dict()
# setup information for the processor map.
cm.setup_processor_map()
# build a single cell with all the particles
cm._build_cell()
cells_dict = cm.cells_dict
proc_map = cm.proc_map
# Test the initial setup
if pid == 0:
assert len(cells_dict) == 1, "At this stage only the base cell should exist"
cell = cells_dict.values()[0]
index_lists = []
cell.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
index_array.sort()
# check the indices
cid = cells_dict.keys()[0]
assert (cid.x, cid.y, cid.z) == (0,0,0)
for i in range(25):
assert index_array[i] == i
# test the block size for the processor map
assert proc_map.block_size == 0.5
print "Checking cells_update"
# check bin_particles
print "Testing bin particles: new_block_cells, remote_block_cells"
new_block_cells, remote_block_cells = cm.bin_particles()
# the local and global proc_map should be empty
assert len(proc_map.local_block_map) == 0
assert len(proc_map.block_map) == 0
# the remote block cells should be empty
assert len(remote_block_cells) == 0
# there should be four new block cells
bid1 = base.IntPoint(0,0,0)
bid2 = base.IntPoint(1,0,0)
bid3 = base.IntPoint(1,1,0)
bid4 = base.IntPoint(0,1,0)
assert new_block_cells.has_key(bid1)
assert new_block_cells.has_key(bid2)
assert new_block_cells.has_key(bid3)
assert new_block_cells.has_key(bid4)
# the cells dict should be empty as well at this point
assert len(cells_dict) == 0
# test the particle copies for the new blocks
print "Testing create_new_particle_copies"
new_block_particles = cm.create_new_particle_copies(new_block_cells,
False)
assert len(new_block_particles) == 4
# check particles in bid 0,0,0
parray_list = new_block_particles.get(bid1)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_000_indices)
# check particles in bid 1,0,0
parray_list = new_block_particles.get(bid2)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_100_indices)
# check particles in bid 1,1,0
parray_list = new_block_particles.get(bid3)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_110_indices)
# check particles in bid 0,1,0
parray_list = new_block_particles.get(bid4)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_010_indices)
print "Testing assign_new_blocks: proc_map"
# assign the new blocks to the processor map
cm.assign_new_blocks(new_block_cells)
# check the processor map
assert len(cm.proc_map.local_block_map) == 4
assert len(cm.proc_map.block_map) == 4
assert cm.proc_map.nbr_procs == [pid]
# compute cell size
cm.compute_cell_size()
assert cm.cell_size == 0.5
# ensure all particles are local (!=0)
pa = cm.arrays_to_bin[0]
local = pa.get("local", only_real_particles=False)
for i in range(pa.get_number_of_particles()):
assert local[i] != 0
print "Testing rebin particles"
# rebin particles
cm.rebin_particles()
# now check the cells_dict
cells_dict = cm.cells_dict
assert len(cells_dict) == 4
# check the particles in the cells
cids = [base.IntPoint(0,0,0), base.IntPoint(1,0,0),
base.IntPoint(1,1,0), base.IntPoint(0,1,0)]
index_map = [block_000_indices, block_100_indices,
block_110_indices, block_010_indices]
for i in range(4):
cid = cids[i]
cell = cells_dict.get(cid)
index_lists = []
cell.get_particle_ids(index_lists)
cell_indices = index_lists[0].get_npy_array()
cell_indices.sort()
assert list(cell_indices) == list(index_map[i])
request_to_start = True
go_on = False
comm.send(obj=request_to_start, dest=1)
print "Requested process 1 to catch up "
go_on = comm.recv(source=1)
if go_on:
print "Picking up from where we left... "
print "Testing glb_update_proc_map"
# update the global processor map
cm.remove_remote_particles()
cm.delete_empty_cells()
cm.proc_map.glb_update_proc_map(cm.cells_dict)
recv_particles = cm.proc_map.resolve_procmap_conflicts({})
cm.add_entering_particles_from_neighbors(recv_particles)
cm.remove_remote_particles()
# check the processor maps
print "Processor 0 Block Maps"
print "Local\n"
for blockid in cm.proc_map.local_block_map:
print blockid
print
print "Global\n"
for blockid in cm.proc_map.block_map:
print blockid
print
print_yours=True
comm.send(obj=print_yours, dest=1)
print "Testing Neighbors 0"
assert cm.proc_map.nbr_procs == [0,1]
# exchange neighbor particles
cm.exchange_neighbor_particles()
print "Testing Exchange Neighbor Particles"
print "Cells Dict For Processor 0 After Exchange\n"
for cid, cell in cells_dict.iteritems():
print cid, "np = ", cell.get_number_of_particles()
print_yours=True
comm.send(obj=print_yours, dest=1)
print "Testing remote particle indices on Processor 0"
parray = cm.arrays_to_bin[0]
np = parray.get_number_of_particles()
nrp = parray.num_real_particles
assert nrp == 25
assert np == 40
local = parray.get("local", only_real_particles=False)
rpi = cm.remote_particle_indices[1][0]
assert rpi[0] == nrp
assert rpi[1] == np
for i in range(np):
if i >= nrp:
assert local[i] == 0
else:
assert local[i] == 1
# test the update of remote particle indices
print "Testing Update Remote Particle Properties on processor 0"
# change the local property say 'p' and 'rho' to -1
pa = cm.arrays_to_bin[0]
p = pa.get('p', only_real_particles=False)
rho = pa.get('rho', only_real_particles=False)
p[:nrp] = -1
rho[:nrp] = -1
for i in range(np):
if i >= nrp:
assert p[i] != -1
assert rho[i] != -1
yours_is_set = comm.recv(source=1)
if yours_is_set:
cm.update_remote_particle_properties([['p','rho']])
p = pa.get('p', only_real_particles=False)
rho = pa.get('rho', only_real_particles=False)
for i in range(np):
if i >= nrp:
assert p[i] == -1
assert rho[i] == -1
#####################################################################
# SECOND ITERATION
#####################################################################
# test the configuration
cids = [base.IntPoint(0,0,0), base.IntPoint(1,0,0), base.IntPoint(1,1,0),
base.IntPoint(0,1,0), base.IntPoint(2,0,0), base.IntPoint(2,1,0)]
pa = cm.arrays_to_bin[0]
for cid in cids:
assert cm.cells_dict.has_key(cid)
if cid in [base.IntPoint(2,0,0), base.IntPoint(2,1,0)]:
cell = cells_dict.get(cid)
index_lists = []
cell.get_particle_ids(index_lists)
parray = pa.extract_particles(index_lists[0])
local = parray.get('local', only_real_particles=False)
for val in local:
assert val == 0
# remove non local particles
cm.remove_remote_particles()
np = pa.get_number_of_particles()
assert np == 25
# move 6 particles in cell/block (1,0,0) to (2,0,0)
x = pa.get('x')
for i in block_100_indices:
x[i] += 0.5
cm.cells_update()
np = pa.get_number_of_particles()
nrp = pa.num_real_particles
assert np == 40
assert nrp == 19
# now move the 4 particles in cell/block (1,1,0) to block/cell (1,2,0)
y = pa.get('y')
cell_110 = cm.cells_dict.get(base.IntPoint(1,1,0))
index_lists = []
cell_110.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
for i in index_array:
y[i] += 0.5
# now call a cells update
cm.cells_update()
np = pa.get_number_of_particles()
nrp = pa.num_real_particles
assert nrp == 19 + 6
assert np == 19 + 6
if pid == 1:
start = False
start = comm.recv(source=0)
if start:
print "Process 1 starting after request "
assert len(cells_dict) == 1, "only the base cell should exist"
cell = cells_dict.values()[0]
index_lists = []
cell.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
index_array.sort()
# check the indices
cid = cells_dict.keys()[0]
#assert (cid.x, cid.y, cid.z) == (0,0,0)
for i in range(25):
#assert index_array[i] == i
pass
# test the block size for the processor map
assert proc_map.block_size == 0.5
print "Checking cells_update"
# check bin_particles
print "Testing bin particles: new_block_cells, remote_block_cells"
new_block_cells, remote_block_cells = cm.bin_particles()
# the local and global proc_map should be empty
assert len(proc_map.local_block_map) == 0
assert len(proc_map.block_map) == 0
# the remote block cells should be empty
assert len(remote_block_cells) == 0
# there should be four new block cells
bid1 = base.IntPoint(0,0,0)
bid2 = base.IntPoint(1,0,0)
bid3 = base.IntPoint(1,1,0)
bid4 = base.IntPoint(0,1,0)
#assert new_block_cells.has_key(bid1)
#assert new_block_cells.has_key(bid2)
#assert new_block_cells.has_key(bid3)
#assert new_block_cells.has_key(bid4)
# the cells dict should be empty as well at this point
assert len(cells_dict) == 0
# test the particle copies for the new blocks
print "Testing create_new_particle_copies"
new_block_particles = cm.create_new_particle_copies(new_block_cells,
False)
#assert len(new_block_particles) == 4
# check particles in bid 0,0,0
#parray_list = new_block_particles.get(bid1)
#assert len(parray_list) == 1
#parray = parray_list[0]
#indices = parray.get("idx")
#indices.sort()
#assert list(indices) == list(block_000_indices)
# check particles in bid 1,0,0
#parray_list = new_block_particles.get(bid2)
#assert len(parray_list) == 1
#parray = parray_list[0]
#indices = parray.get("idx")
#indices.sort()
#assert list(indices) == list(block_100_indices)
# check particles in bid 1,1,0
#parray_list = new_block_particles.get(bid3)
#assert len(parray_list) == 1
#parray = parray_list[0]
#indices = parray.get("idx")
#indices.sort()
#assert list(indices) == list(block_110_indices)
# check particles in bid 0,1,0
#parray_list = new_block_particles.get(bid4)
#assert len(parray_list) == 1
#parray = parray_list[0]
#indices = parray.get("idx")
#indices.sort()
#assert list(indices) == list(block_010_indices)
print "Testing assign_new_blocks: proc_map"
# assign the new blocks to the processor map
cm.assign_new_blocks(new_block_cells)
# check the processor map
#assert len(cm.proc_map.local_block_map) == 4
#assert len(cm.proc_map.block_map) == 4
#assert cm.proc_map.nbr_procs == [pid]
# compute cell size
cm.compute_cell_size()
assert cm.cell_size == 0.5
# ensure all particles are local (!=0)
pa = cm.arrays_to_bin[0]
local = pa.get("local")
for i in range(pa.get_number_of_particles()):
assert local[i] != 0
print "Testing rebin particles"
# rebin particles
cm.rebin_particles()
# now check the cells_dict
cells_dict = cm.cells_dict
#assert len(cells_dict) == 4
# check the particles in the cells
cids = [base.IntPoint(0,0,0), base.IntPoint(1,0,0),
base.IntPoint(1,1,0), base.IntPoint(0,1,0)]
index_map = [block_000_indices, block_100_indices,
block_110_indices, block_010_indices]
#for i in range(4):
# cid = cids[i]
# cell = cells_dict.get(cid)
# index_lists = []
# cell.get_particle_ids(index_lists)
# cell_indices = index_lists[0].get_npy_array()
# cell_indices.sort()
#assert list(cell_indices) == list(index_map[i])
print "Requesting process 0 to continue"
comm.send(obj=True, dest=0)
print "Testing glb_update_proc_map"
# update the global processor map
cm.remove_remote_particles()
cm.delete_empty_cells()
cm.proc_map.glb_update_proc_map(cm.cells_dict)
recv_particles = cm.proc_map.resolve_procmap_conflicts({})
cm.add_entering_particles_from_neighbors(recv_particles)
cm.remove_remote_particles()
# check the processor maps
time.sleep(.5)
should_i_print = comm.recv(source=0)
if should_i_print:
print "Processor 1 Block Maps"
print "Local\n"
for blockid in cm.proc_map.local_block_map:
print blockid
print
print "Global\n"
for blockid in cm.proc_map.block_map:
print blockid
print
print "Testing Neighbors 1"
assert cm.proc_map.nbr_procs == [0,1]
# exchange neighbor particles
cm.exchange_neighbor_particles()
print "Testing Exchange Neighbor Particles"
should_i_print_cells_dict = comm.recv(source=0)
if should_i_print_cells_dict:
print "Cells Dict For Processor 1 After Exchange\n"
for cid, cell in cells_dict.iteritems():
print cid, "np = ", cell.get_number_of_particles()
print "Testing remote particle indices on Processor 1"
parray = cm.arrays_to_bin[0]
np = parray.get_number_of_particles()
nrp = parray.num_real_particles
assert nrp == 25
assert np == 35
local = parray.get("local", only_real_particles=False)
rpi = cm.remote_particle_indices[0][0]
assert rpi[0] == nrp
assert rpi[1] == np
for i in range(np):
if i >= nrp:
assert local[i] == 0
else:
assert local[i] == 1
# test the update of remote particle indices
print "Testing Update Remote Particle Properties on processor 1"
# change some local property say 'p' and 'rho' to -1
pa = cm.arrays_to_bin[0]
p = pa.get('p', only_real_particles=False)
rho = pa.get('rho', only_real_particles=False)
p[:nrp] = -1
rho[:nrp] = -1
for i in range(np):
if i >= nrp:
assert p[i] != -1
assert rho[i] != -1
mine_is_set = True
comm.send(obj=mine_is_set, dest=0)
cm.update_remote_particle_properties([['p','rho']])
p = pa.get('p', only_real_particles=False)
rho = pa.get('rho', only_real_particles=False)
for i in range(np):
if i >= nrp:
assert p[i] == -1
assert rho[i] == -1
cm.remove_remote_particles()
np = pa.get_number_of_particles()
assert np == 25
cm.cells_update()
np = pa.get_number_of_particles()
nrp = pa.num_real_particles
assert np == 35
assert nrp == 31
# now move particles in cell (2,1,0) to cell (1, 2, 0)
x, y = pa.get('x', 'y')
cell_210 = cm.cells_dict.get(base.IntPoint(2,1,0))
index_lists = []
cell_210.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
for i in index_array:
y[i] += 0.5
x[i] -= 0.5
# now call a cells update
cm.cells_update()
np = pa.get_number_of_particles()
nrp = pa.num_real_particles
assert nrp == 31 - 6
assert np == nrp
| [
[
1,
0,
0.0147,
0.0147,
0,
0.66,
0,
548,
0,
1,
0,
0,
548,
0,
0
],
[
1,
0,
0.0294,
0.0147,
0,
0.66,
0.1,
212,
0,
1,
0,
0,
212,
0,
0
],
[
1,
0,
0.0441,
0.0147,
0,
0.6... | [
"import nose.plugins.skip as skip",
"import pysph.base.api as base",
"import pysph.parallel.api as parallel",
"import numpy",
"import pylab",
"import time",
"from mpi4py import MPI",
"def draw_cell(cell, color=\"b\"):\n centroid = base.Point()\n cell.get_centroid(centroid)\n \n half_size =... |
""" Tests for the parallel cell manager """
import pysph.base.api as base
import pysph.parallel.api as parallel
import numpy
import time
import pdb
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
pid = comm.Get_rank()
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
xc = numpy.arange(0,1.0, 0.2)
x, y = numpy.meshgrid(xc,xc)
x = x = x.ravel()
y = y = y.ravel()
h = h = numpy.ones_like(x) * 0.25
dx = dy = 0.2
dx = dx
block_size = 0.5
cell_size = 0.5
block_000_indices = 0,1,2,5,6,7,10,11,12
block_100_indices = 3,4,8,9,13,14
block_010_indices = 15,16,17,20,21,22
block_110_indices = 18,19,23,24
name = "rank" + str(pid)
pa = base.get_particle_array(name="test", x=x, y=y, h=h)
pa.x += 1.0*pid
pa.x += 1e-10
# create the cell manager
cm = cm = parallel.ParallelCellManager(arrays_to_bin=[pa,],
max_radius_scale=2.0,
dimension=2.0,
load_balancing=False,
initialize=False,
min_cell_size=0.5)
# find global min and max
cm.update_global_properties()
# compute block size
cm.compute_block_size(0.5)
# compute cell size
cm.compute_cell_size(0,0)
# setup array indices.
cm.py_rebuild_array_indices()
# setup the cells_dict
cm.py_setup_cells_dict()
# setup information for the processor map.
cm.setup_processor_map()
# build a single cell with all the particles
cm._build_cell()
cells_dict = cm.cells_dict
proc_map = cm.proc_map
# Test the initial setup
assert len(cells_dict) == 1, "At this stage only the base cell should exist"
cell = cells_dict.values()[0]
index_lists = []
cell.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
index_array.sort()
# check the indices
#cid = cells_dict.keys()[0]
#assert (cid.x, cid.y, cid.z) == [(0,0,0),(2,0,0)][pid]
for i in range(25):
assert index_array[i] == i
# test the block size for the processor map
assert proc_map.block_size == 0.5
print "Checking cells_update"
# check bin_particles
print "Testing bin particles: new_block_cells, remote_block_cells"
new_block_cells, remote_block_cells = cm.bin_particles()
# the local and global proc_map should be empty
assert len(proc_map.local_block_map) == 0
assert len(proc_map.block_map) == 0
# the remote block cells should be empty
assert len(remote_block_cells) == 0
# there should be four new block cells
bid1 = base.IntPoint(0+2*pid,0,0)
bid2 = base.IntPoint(1+2*pid,0,0)
bid3 = base.IntPoint(1+2*pid,1,0)
bid4 = base.IntPoint(0+2*pid,1,0)
print new_block_cells
assert new_block_cells.has_key(bid1)
assert new_block_cells.has_key(bid2)
assert new_block_cells.has_key(bid3)
assert new_block_cells.has_key(bid4)
# the cells dict should be empty as well at this point
assert len(cells_dict) == 0
# test the particle copies for the new blocks
print "Testing create_new_particle_copies"
new_block_particles = cm.create_new_particle_copies(new_block_cells,
False)
assert len(new_block_particles) == 4
# check particles in bid 0,0,0
parray_list = new_block_particles.get(bid1)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_000_indices)
# check particles in bid 1,0,0
parray_list = new_block_particles.get(bid2)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_100_indices)
# check particles in bid 1,1,0
parray_list = new_block_particles.get(bid3)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_110_indices)
# check particles in bid 0,1,0
parray_list = new_block_particles.get(bid4)
assert len(parray_list) == 1
parray = parray_list[0]
indices = parray.get("idx")
indices.sort()
assert list(indices) == list(block_010_indices)
print "Testing assign_new_blocks: proc_map"
# assign the new blocks to the processor map
cm.assign_new_blocks(new_block_cells)
# check the processor map
assert len(cm.proc_map.local_block_map) == 4
assert len(cm.proc_map.block_map) == 4
assert cm.proc_map.nbr_procs == [pid]
# compute cell size
cm.compute_cell_size()
assert cm.cell_size == 0.5
# ensure all particles are local (!=0)
pa = cm.arrays_to_bin[0]
local = pa.get("local", only_real_particles=False)
for i in range(pa.get_number_of_particles()):
assert local[i] != 0
print "Testing rebin particles"
# rebin particles
cm.rebin_particles()
# now check the cells_dict
cells_dict = cm.cells_dict
assert len(cells_dict) == 4
# check the particles in the cells
cids = [base.IntPoint(0+2*pid,0,0), base.IntPoint(1+2*pid,0,0),
base.IntPoint(1+2*pid,1,0), base.IntPoint(0+2*pid,1,0)]
index_map = [block_000_indices, block_100_indices,
block_110_indices, block_010_indices]
for i in range(4):
cid = cids[i]
cell = cells_dict.get(cid)
index_lists = []
cell.get_particle_ids(index_lists)
cell_indices = index_lists[0].get_npy_array()
cell_indices.sort()
assert list(cell_indices) == list(index_map[i])
print "Testing glb_update_proc_map"
# update the global processor map
cm.remove_remote_particles()
cm.delete_empty_cells()
cm.proc_map.glb_update_proc_map(cm.cells_dict)
recv_particles = cm.proc_map.resolve_procmap_conflicts({})
cm.add_entering_particles_from_neighbors(recv_particles)
cm.remove_remote_particles()
# check the processor maps
print "Processor", pid, "Block Maps"
print "Local\n"
for blockid in cm.proc_map.local_block_map:
print blockid
print
print "Global\n"
for blockid in cm.proc_map.block_map:
print blockid, cm.proc_map.block_map[blockid]
print
print "Testing Neighbors", pid
assert cm.proc_map.nbr_procs == [i for i in (pid-1, pid, pid+1) if i>=0 and i<num_procs]
# exchange neighbor particles
cm.exchange_neighbor_particles()
print "Testing Exchange Neighbor Particles"
print "Cells Dict For Processor", pid, "After Exchange\n"
for cid, cell in cells_dict.iteritems():
print cid, "np = ", cell.get_number_of_particles()
#print_yours=True
#comm.send(obj=print_yours, dest=1)
print "Testing remote particle indices on Processor", pid
parray = cm.arrays_to_bin[0]
np = parray.get_number_of_particles()
nrp = parray.num_real_particles
assert nrp == 25, "nrp=%r"%nrp
assert np == nrp + 15*(pid<num_procs-1)+10*(pid>0), "np=%r != %r"%(np,
nrp + 15*(pid<num_procs)+10*(pid>0))
local = parray.get("local", only_real_particles=False)
for i in proc_map.nbr_procs:
if i == pid: continue
rpi = cm.remote_particle_indices[i][0]
print pid, cm.remote_particle_indices
r = nrp + 10*(pid<i and pid>0)
assert rpi[0] == r, "%r,%r, rpi[0]=%r, r=%r"%(i,pid, rpi[0], r)
r = r + 10 + 5*(pid<i)
assert rpi[1] == r, "%r,%r, rpi[1]=%r != %r"%(i,pid, rpi[1], r)
for i in range(np):
if i >= nrp:
assert local[i] == 0
else:
assert local[i] == 1
# test the update of remote particle indices
print "Testing Update Remote Particle Properties on processor", pid
# change the local property say 'p' and 'rho' to -1
pa = cm.arrays_to_bin[0]
p = pa.get('p', only_real_particles=False)
rho = pa.get('rho', only_real_particles=False)
p[:nrp] = -1
rho[:nrp] = -1
for i in range(np):
if i >= nrp:
assert p[i] != -1
assert rho[i] != -1
#yours_is_set = comm.recv(source=1)
#if yours_is_set:
cm.update_remote_particle_properties([['p','rho']])
p = pa.get('p', only_real_particles=False)
rho = pa.get('rho', only_real_particles=False)
for i in range(np):
if i >= nrp:
assert p[i] == -1
assert rho[i] == -1
#####################################################################
# SECOND ITERATION
#####################################################################
# test the configuration
cids = [base.IntPoint(0+pid*2,0,0), base.IntPoint(1+pid*2,0,0),
base.IntPoint(1+pid*2,1,0), base.IntPoint(0+pid*2,1,0)]
for nbr in proc_map.nbr_procs:
if nbr == pid: continue
if nbr < pid:
cids.append(base.IntPoint(-1+pid*2,0,0))
cids.append(base.IntPoint(-1+pid*2,1,0))
elif nbr > pid:
cids.append(base.IntPoint(2+pid*2,0,0))
cids.append(base.IntPoint(2+pid*2,1,0))
pa = cm.arrays_to_bin[0]
for cid in cids:
assert cm.cells_dict.has_key(cid), "%r %r"%(pid, cid)
if cid in [base.IntPoint(2-pid,0,0), base.IntPoint(2-pid,1,0)]:
cell = cells_dict.get(cid)
index_lists = []
cell.get_particle_ids(index_lists)
parray = pa.extract_particles(index_lists[0])
local = parray.get('local', only_real_particles=False)
for val in local:
assert val == 0
# remove non local particles
cm.remove_remote_particles()
cm.delete_empty_cells()
np = pa.get_number_of_particles()
assert np == 25
# move 6 particles in cell/block (1,0,0) to (2,0,0) in pid=0
x = pa.get('x')
if pid == 0:
for i in block_100_indices:
x[i] += 0.5
print "Local\n"
for blockid in cm.proc_map.local_block_map:
print blockid
print
print "Global\n"
for blockid in cm.proc_map.block_map:
print blockid, cm.proc_map.block_map[blockid]
print
cm.cells_update()
npr = sum([i.num_real_particles for i in cm.arrays_to_bin])
nprt = comm.bcast(comm.reduce(npr))
assert nprt==25*num_procs
print "Local\n"
for blockid in cm.proc_map.local_block_map:
print blockid
print
print "Global\n"
for blockid in cm.proc_map.block_map:
print blockid, cm.proc_map.block_map[blockid]
print
print cm.cells_dict.values()
np = pa.get_number_of_particles()
nrp = pa.num_real_particles
print pid, np, nrp
if num_procs == 2:
assert np == [40,35][pid], '%r, %r'%(pid, np)
assert nrp == [19,31][pid], '%r, %r'%(pid, np)
# now move the 4 particles in cell/block (1,1,0) to block/cell (1,2,0) in pid=0
# and particles in cell (2,1,0) to cell (1, 2, 0) in pid=1
x, y = pa.get('x', 'y')
if num_procs == 2:
cell = cm.cells_dict.get(base.IntPoint([1,2][pid],1,0))
index_lists = []
cell.get_particle_ids(index_lists)
index_array = index_lists[0].get_npy_array()
print index_array
for i in index_array:
y[i] += 0.5
if pid == 1:
x[i] -= 0.5
# now call a cells update
cm.cells_update()
print "Local\n"
for blockid in cm.proc_map.local_block_map:
print blockid
print
print "Global\n"
for blockid in cm.proc_map.block_map:
print blockid, cm.proc_map.block_map[blockid]
print
print cm.cells_dict.values()
npr = sum([i.num_real_particles for i in cm.arrays_to_bin])
nprt = comm.bcast(comm.reduce(npr))
assert nprt==25*num_procs
np = pa.get_number_of_particles()
nrp = pa.num_real_particles
print pid, np
if num_procs == 2:
assert nrp == [19 + 6, 31 - 6][pid]
#assert np == nrp + 10
#assert np == [np, 41][pid]
assert np == nrp, "%r, %r!=%r"%(pid,np, nrp)
cells_nps = {base.IntPoint(0,0,0):9,
base.IntPoint(2,0,0):15,
base.IntPoint(3,0,0):6,
base.IntPoint(0,1,0):6,
base.IntPoint(3,1,0):4,
base.IntPoint(1,2,0):10,
}
print cm.proc_map.block_map
for cid, cell in cm.cells_dict.iteritems():
print cid, cell, cell.get_number_of_particles()
assert cell.get_number_of_particles() == cells_nps[cid], '%r'%(cell)
npr = sum([i.num_real_particles for i in cm.arrays_to_bin])
assert comm.bcast(comm.reduce(npr)) == 50
assert nrp == [19 + 6, 31 - 6][pid]
cm.cells_update()
npr = sum([i.num_real_particles for i in cm.arrays_to_bin])
assert comm.bcast(comm.reduce(npr)) == 50
assert nrp == [19 + 6, 31 - 6][pid]
#print cm.proc_map.nbr_procs
| [
[
1,
0,
0.1111,
0.1111,
0,
0.66,
0,
212,
0,
1,
0,
0,
212,
0,
0
],
[
1,
0,
0.2222,
0.1111,
0,
0.66,
0.1667,
496,
0,
1,
0,
0,
496,
0,
0
],
[
1,
0,
0.4444,
0.1111,
0,
... | [
"import pysph.base.api as base",
"import pysph.parallel.api as parallel",
"import numpy",
"import time",
"import pdb",
"from mpi4py import MPI",
"import logging"
] |
"""
Simple script to check if the load balancing works on 2-d data.
"""
try:
import mpi4py.MPI as mpi
except ImportError:
import nose.plugins.skip as skip
reason = "mpi4py not installed"
raise skip.SkipTest(reason)
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
rank = comm.Get_rank()
# logging setup
# logging setup
import logging
logger = logging.getLogger()
log_file_name = '/tmp/log_pysph_'+str(rank)
logging.basicConfig(level=logging.DEBUG, filename=log_file_name,
filemode='w')
logger.addHandler(logging.StreamHandler())
# local imports
from pysph.base.particle_array import ParticleArray
from pysph.parallel.parallel_cell import ParallelCellManager
from pysph.solver.basic_generators import RectangleGenerator
from pysph.base.cell import INT_INF
from pysph.base.point import *
pcm = ParallelCellManager(initialize=False, dimension=2)
parray = ParticleArray(name='parray')
if rank == 0:
lg = RectangleGenerator(particle_spacing_x1=0.1,
particle_spacing_x2=0.1)
x, y, z = lg.get_coords()
parray.add_property({'name':'x', 'data':x})
parray.add_property({'name':'y', 'data':y})
parray.add_property({'name':'z', 'data':z})
parray.add_property({'name':'h'})
parray.align_particles()
parray.h[:] = 0.1
else:
parray.add_property({'name':'x'})
parray.add_property({'name':'y'})
parray.add_property({'name':'z'})
parray.add_property({'name':'h'})
pcm.add_array_to_bin(parray)
pcm.initialize()
| [
[
8,
0,
0.0351,
0.0526,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
7,
0,
0.1316,
0.1053,
0,
0.66,
0.05,
0,
0,
1,
0,
0,
0,
0,
1
],
[
1,
1,
0.1053,
0.0175,
1,
0.02,
... | [
"\"\"\"\nSimple script to check if the load balancing works on 2-d data.\n\"\"\"",
"try:\n import mpi4py.MPI as mpi\nexcept ImportError:\n import nose.plugins.skip as skip\n reason = \"mpi4py not installed\"\n raise skip.SkipTest(reason)",
" import mpi4py.MPI as mpi",
" import nose.plugins.s... |
""" Test the share_data function for various cases
cases to run are chosen based on the size of the MPI.COMM_wORLD
case 1: for 5 processes
Processors arrangement:
4
0 1 2 3
Nbr lists:
0: 1,4
1: 0,2,4
2: 1,3,4
3: 2
4: 0,1,2
case 2: for 2 processes
both neighbors of each other
case 3,4,5: n processes (n>1 for case 5)
all neighbors of each other
"""
try:
import mpi4py.MPI as mpi
except ImportError:
import nose.plugins.skip as skip
reason = "mpi4py not installed"
raise skip.SkipTest(reason)
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
rank = comm.Get_rank()
from pysph.parallel.parallel_cell import share_data
def case1(multi=True, to_self=False):
""" 5 processes """
if num_procs != 5: return
nbr_lists = [[1,4],
[0,2,4],
[1,3,4],
[2],
[0,1,2],
]
nbr_list = nbr_lists[rank]
if to_self: nbr_list.append(rank)
proc_data = {}
for nbr in nbr_list:
proc_data[nbr] = (rank, nbr)
recv_data = share_data(rank, nbr_list, proc_data, comm, multi=multi)
assert len(recv_data) == len(nbr_list)
if multi:
for pid,data in recv_data.iteritems():
assert data == (pid, rank)
else:
for pid,data in recv_data.iteritems():
for pid2,data2 in data.iteritems():
assert data2 == (pid, pid2)
def case2():
""" 2 processes """
if num_procs != 2: return
nbr_list = [1-rank]
proc_data = {1-rank:(rank, 1-rank)}
recv_data = share_data(rank, nbr_list, proc_data, comm, multi=True)
print rank, recv_data
def case3(multi=True, to_self=False):
""" all-to-all communication """
nbr_list = range(num_procs)
if not to_self: nbr_list.remove(rank)
proc_data = {}
for nbr in nbr_list:
proc_data[nbr] = (rank, nbr)
recv_data = share_data(rank, nbr_list, proc_data, comm, multi=multi)
assert len(recv_data) == len(nbr_list)
if multi:
for pid,data in recv_data.iteritems():
assert data == (pid, rank)
else:
print rank, recv_data
for pid,data in recv_data.iteritems():
for pid2,data2 in data.iteritems():
assert data2 == (pid, pid2)
def case4(multi=True, to_self=False):
""" all-to-all oneway communication """
nbr_list = range(num_procs)
if not to_self: nbr_list.remove(rank)
proc_data = {}
for nbr in nbr_list:
proc_data[nbr] = (rank, nbr)
recv_data = share_data(rank, nbr_list, proc_data, comm, multi=multi)
assert len(recv_data) == len(nbr_list)
if multi:
for pid,data in recv_data.iteritems():
assert data == (pid, rank)
else:
print rank, recv_data
for pid,data in recv_data.iteritems():
for pid2,data2 in data.iteritems():
assert data2 == (pid, pid2)
def case5(multi=True, to_self=False):
""" oneway communication to next two consecutive procs """
send_procs = [(rank+1)%num_procs, (rank+2)%num_procs]
recv_procs = [(rank-1)%num_procs, (rank-2)%num_procs]
proc_data = {}
for nbr in send_procs:
proc_data[nbr] = (rank, nbr)
print rank, send_procs, recv_procs, proc_data
recv_data = share_data(rank, send_procs, proc_data, comm, multi=multi,
recv_procs=recv_procs)
assert len(recv_data) == len(recv_procs)
if multi:
for pid,data in recv_data.iteritems():
assert data == (pid, rank)
else:
print rank, recv_data
for pid,data in recv_data.iteritems():
for pid2,data2 in data.iteritems():
assert data2 == (pid, pid2)
if __name__ == '__main__':
if num_procs == 2:
case2()
for multi in True,False:
for to_self in True,False:
if num_procs == 5:
case1(multi, to_self)
case3(multi, to_self)
case4(multi, to_self)
if num_procs > 1:
case5(multi, to_self)
| [
[
8,
0,
0.0851,
0.1631,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
7,
0,
0.195,
0.0426,
0,
0.66,
0.0833,
0,
0,
1,
0,
0,
0,
0,
1
],
[
1,
1,
0.1844,
0.0071,
1,
0.08,
... | [
"\"\"\" Test the share_data function for various cases\n\ncases to run are chosen based on the size of the MPI.COMM_wORLD\n\ncase 1: for 5 processes\nProcessors arrangement:\n 4\n0 1 2 3",
"try:\n import mpi4py.MPI as mpi\nexcept ImportError:\n import nose.plugins.skip as skip\n reason = \"mpi4py not in... |
"""
Simple script to check if the load balancing works on 1-d data.
"""
try:
import mpi4py.MPI as mpi
except ImportError:
import nose.plugins.skip as skip
reason = "mpi4py not installed"
raise skip.SkipTest(reason)
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
rank = comm.Get_rank()
# logging setup
# logging setup
import logging
logger = logging.getLogger()
log_file_name = '/tmp/log_pysph_'+str(rank)
logging.basicConfig(level=logging.DEBUG, filename=log_file_name,
filemode='w')
logger.addHandler(logging.StreamHandler())
# local imports
from pysph.base.particle_array import ParticleArray
from pysph.parallel.parallel_cell import ParallelCellManager
from pysph.solver.basic_generators import LineGenerator
from pysph.base.cell import INT_INF
from pysph.base.point import *
pcm = ParallelCellManager(initialize=False, dimension=1)
parray = ParticleArray(name='parray')
if rank == 0:
lg = LineGenerator(start_point=Point(0, 0, 0),
end_point=Point(1.0, 0, 0),
particle_spacing=0.01)
x, y, z = lg.get_coords()
parray.add_property({'name':'x', 'data':x})
parray.add_property({'name':'y', 'data':y})
parray.add_property({'name':'z', 'data':z})
parray.add_property({'name':'h'})
parray.align_particles()
parray.h[:] = 0.01
else:
parray.add_property({'name':'x'})
parray.add_property({'name':'y'})
parray.add_property({'name':'z'})
parray.add_property({'name':'h'})
pcm.add_array_to_bin(parray)
pcm.initialize()
| [
[
8,
0,
0.0339,
0.0508,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
7,
0,
0.1271,
0.1017,
0,
0.66,
0.05,
0,
0,
1,
0,
0,
0,
0,
1
],
[
1,
1,
0.1017,
0.0169,
1,
0.29,
... | [
"\"\"\"\nSimple script to check if the load balancing works on 1-d data.\n\"\"\"",
"try:\n import mpi4py.MPI as mpi\nexcept ImportError:\n import nose.plugins.skip as skip\n reason = \"mpi4py not installed\"\n raise skip.SkipTest(reason)",
" import mpi4py.MPI as mpi",
" import nose.plugins.s... |
""" Some checks for the parallel cell manager.
Run this script only with less than 5 processors.
example : mpiexec -n 2 python parallel_cell_check.py
"""
import time
try:
import mpi4py.MPI as mpi
except ImportError:
import nose.plugins.skip as skip
reason = "mpi4py not installed"
raise skip.SkipTest(reason)
# mpi imports
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
#if num_procs > 4:
# raise SystemError, 'Start this script on less than 5 processors'
rank = comm.Get_rank()
# logging setup
import logging
logger = logging.getLogger()
#log_file_name = 'parallel_cell_check.log.'+str(rank)
#logging.basicConfig(level=logging.DEBUG, filename=log_file_name,
# filemode='w')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
# local imports
from pysph.base.particle_array import ParticleArray
from pysph.parallel.parallel_cell import ParallelCellManager
from pysph.solver.basic_generators import LineGenerator
from pysph.base.cell import INT_INF
from pysph.base.point import *
from pysph.parallel.load_balancer import LoadBalancer
from nose.plugins.attrib import attr
@attr(parallel=True)
def test():
pcm = ParallelCellManager(initialize=False)
# create 2 particles, one with proc 0 another with proc 1
lg = LineGenerator(particle_spacing=0.5)
lg.start_point.x = 0.0
lg.end_point.x = 10.0
lg.start_point.y = lg.start_point.z = 0.0
lg.end_point.y = lg.end_point.z = 0.0
x, y, z = lg.get_coords()
num_particles = len(x)
logger.info('Num particles : %d'%(len(x)))
parray = ParticleArray(name='p1',
x={'data':x},
y={'data':y},
z={'data':z},
h={'data':None, 'default':0.5})
# add parray to the cell manager
parray.add_property({'name':'u'})
parray.add_property({'name':'v'})
parray.add_property({'name':'w'})
parray.add_property({'name':'rho'})
parray.add_property({'name':'p'})
parray = LoadBalancer.distribute_particles(parray, num_procs, 1.0)[rank]
pcm.add_array_to_bin(parray)
np = pcm.arrays_to_bin[0].num_real_particles
nptot = comm.bcast(comm.reduce(np))
assert nptot == num_particles
pcm.initialize()
np = pcm.arrays_to_bin[0].num_real_particles
nptot = comm.bcast(comm.reduce(np))
assert nptot == num_particles
pcm.set_jump_tolerance(INT_INF())
logger.debug('%d: num_cells=%d'%(rank,len(pcm.cells_dict)))
logger.debug('%d:'%rank + ('\n%d '%rank).join([str(c) for c in pcm.cells_dict.values()]))
# on processor 0 move all particles from one of its cell to the next cell
if rank == 0:
cell = pcm.cells_dict.get(list(pcm.proc_map.cell_map.values()[0])[0])
logger.debug('Cell is %s'%(cell))
indices = []
cell.get_particle_ids(indices)
indices = indices[0]
logger.debug('Num particles in Cell is %d'%(indices.length))
parr = cell.arrays_to_bin[0]
x, y, z = parr.get('x', 'y', 'z', only_real_particles=False)
logger.debug(str(len(x)) + str(x))
logger.debug(str(indices.length) + str(indices.get_npy_array()))
for i in range(indices.length):
x[indices[i]] += cell.cell_size
parr.set_dirty(True)
pcm.update_status()
logger.debug('Calling cell manager update')
logger.debug('Is dirty %s'%(pcm.is_dirty))
pcm.update()
np = pcm.arrays_to_bin[0].num_real_particles
nptot = comm.bcast(comm.reduce(np))
assert nptot == num_particles
#logger.debug('hierarchy :%s'%(pcm.hierarchy_list))
logger.debug('cells : %s'%(pcm.cells_dict))
logger.debug('num particles : %d'%(parray.get_number_of_particles()))
logger.debug('real particles : %d'%(parray.num_real_particles))
| [
[
8,
0,
0.0244,
0.0407,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0569,
0.0081,
0,
0.66,
0.0556,
654,
0,
1,
0,
0,
654,
0,
0
],
[
7,
0,
0.0935,
0.0488,
0,
0.66... | [
"\"\"\" Some checks for the parallel cell manager.\n\nRun this script only with less than 5 processors.\nexample : mpiexec -n 2 python parallel_cell_check.py\n\"\"\"",
"import time",
"try:\n import mpi4py.MPI as mpi\nexcept ImportError:\n import nose.plugins.skip as skip\n reason = \"mpi4py not install... |
#!/bin/env python
"""
Simple test for checking if the control tree is setup properly.
Run this script with the following command
mpiexec -n [num_procs] python controller_check.py
"""
try:
import mpi4py.MPI as mpi
except ImportError:
import nose.plugins.skip as skip
reason = "mpi4py not installed"
raise skip.SkipTest(reason)
# logging setup
import logging
logging.basicConfig(level=logging.DEBUG, filename='/tmp/log_pysph', filemode='a')
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
from mpi4py import MPI
from pysph.parallel.parallel_controller import ParallelController
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
rank = comm.Get_rank()
logger.info('(%d)================controller_check====================='%(rank))
p = ParallelController()
assert p.rank == rank
if rank == 0:
assert p.parent_rank == -1
else:
if rank % 2 == 0:
assert p.parent_rank == ((rank)/2 -1)
else:
assert p.parent_rank == ((rank-1)/2)
if num_procs <= 2*rank + 1:
assert p.l_child_rank == -1
assert p.r_child_rank == -1
elif num_procs <= 2*rank + 2:
assert p.l_child_rank == 2*rank + 1
assert p.r_child_rank == -1
else:
assert p.l_child_rank == 2*rank + 1
assert p.r_child_rank == 2*rank + 2
logger.info('(%d)================controller_check====================='%(rank))
| [
[
8,
0,
0.0877,
0.1228,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
7,
0,
0.2193,
0.1053,
0,
0.66,
0.0667,
0,
0,
1,
0,
0,
0,
0,
1
],
[
1,
1,
0.193,
0.0175,
1,
0.96,
... | [
"\"\"\"\nSimple test for checking if the control tree is setup properly.\n\nRun this script with the following command\n\nmpiexec -n [num_procs] python controller_check.py\n\"\"\"",
"try:\n import mpi4py.MPI as mpi\nexcept ImportError:\n import nose.plugins.skip as skip\n reason = \"mpi4py not installed\... |
"""A parallel manager that uses blocks to partition the domain. At
every iteration, the particles are placed in large bins and these bins
are exchanged across processors.
"""
from parallel_controller import ParallelController
from parallel_manager import ParallelManager
from parallel_cell import share_data
from pysph.base.fast_utils import arange_long
from pysph.base.particle_array import ParticleArray, get_dummy_tag
from pysph.base.cell import py_construct_immediate_neighbor_list
from pysph.base.cell import CellManager
import numpy
# logger imports
import logging
logger = logging.getLogger()
class ProcessorMap(object):
"""The ProcessorMap determines neighboring processors and a list
of cells to send to each processor.
The main data used by the ProcessorMap is the `cells_dict`
corresponding to each processor's local binning. The cell
information is used to construct three dictionaries:
local_cell_map : A dictionary keyed on cell id and with the value
equal to the local processor rank that created this cell.
global_cell_map : A dictionary keyed on cell id and with value a
set of processor ranks that created this cell.
Two processors may own the same region in space and no attempt is
made to resolve this conflict. A suitable subclass may provide a
mechanism to do so.
"""
def __init__(self, parallel_controller=None):
"""Constructor.
Parameters:
-----------
parallel_controller : pysph.base.parallel.ParallelController
The controller object which manages the child and
parent processor ranks required for a global update.
"""
self.parallel_controller = parallel_controller
if parallel_controller is None:
self.parallel_controller = ParallelController()
self.rank = self.parallel_controller.rank
self.comm = self.parallel_controller.comm
self.local_cell_map = {}
self.global_cell_map = {}
self.conflicts = {}
def _local_update(self, cells_dict):
"""Update the local cell map.
The `local_cell_map` is a dictionary keyed on cell id with
value the rank of te local processor that created this cell.
"""
self.local_cell_map = {}
self.global_cell_map = {}
for cid, cell in cells_dict.iteritems():
self.local_cell_map[cid] = set( [self.rank] )
self.global_cell_map[cid] = set( [self.rank] )
def global_update(self, cells_dict):
"""Update the gglobal cell map.
The local cell maps from all processors are passed through the
tree and updated at each stage. After a call to this function,
every processor has the same gobal cell map.
The global cell map is keyed on cell id with value, a list of
processor ranks that created this cell.
"""
self._local_update(cells_dict)
self.conflicts = {}
pc = self.parallel_controller
comm = self.comm
# merge data from all children proc maps.
for c_rank in pc.children_proc_ranks:
c_cell_map = comm.recv(source=c_rank)
# merge the data
for cid in c_cell_map:
if cid in self.global_cell_map:
self.global_cell_map[cid].update( c_cell_map[cid] )
else:
self.global_cell_map[cid] = c_cell_map[cid]
# we now have partially merged data, send it to parent if not root.
if pc.parent_rank > -1:
comm.send(self.global_cell_map, dest=pc.parent_rank)
# receive updated proc map from parent
updated_cell_map = comm.recv(source=pc.parent_rank)
# update the global cell map
self.global_cell_map.clear()
self.global_cell_map.update( updated_cell_map )
# send updated data to children.
for c_rank in pc.children_proc_ranks:
comm.send(self.global_cell_map, dest=c_rank)
def get_cell_list_to_send(self):
"""Return a list of cells to send to each processor.
Neighboring cells are determined allowing for cells to be
shared across processors. The return value is a dictionary
keyed on processor id with value equal to the list of cells to
send that processor.
"""
local_map = self.local_cell_map
global_map = self.global_cell_map
pc = self.parallel_controller
cell_list_to_send = {}
for cid in local_map:
neighbor_ids = []
py_construct_immediate_neighbor_list(cid, neighbor_ids,
include_self=False)
# handle non-overlapping regions
for neighbor_id in neighbor_ids:
if neighbor_id in global_map:
owning_pids = list(global_map[neighbor_id])
for pid in owning_pids:
if not pid in cell_list_to_send:
cell_list_to_send[pid] = set([cid])
else:
cell_list_to_send[pid].update([cid])
# handle overlapping regions
conflicting_pids = list(global_map[cid])
if len(conflicting_pids) > 0:
for neighbor_id in neighbor_ids:
if neighbor_id in local_map:
for pid in conflicting_pids:
if not pid in cell_list_to_send:
cell_list_to_send[pid] = set([cid])
else:
cell_list_to_send[pid].update([cid])
return cell_list_to_send
def resolve_conflicts(self):
pass
class SimpleBlockManager(ParallelManager):
"""A parallel manager based on blocks.
Particles are binned locally with a bin/cell size equal to some
factor times the maximum smoothing length of the particles. The
resulting cell structure is used to determine neighboring
processors using the ProcessorMap and only a single layer of cells
is communicated.
"""
def __init__(self, block_scale_factor=6.0):
"""Constructor.
Parameters:
-----------
block_scale_factor : double
The scale factor to determine the bin size. The smoothing length
is chosen as: block_scale_factor * glb_max_h
The block_scale_factor should be greater than or equal to the
largest kernel radius for all possibly different kernels used
in a simulation.
"""
self.parallel_controller = ParallelController()
self.processor_map = ProcessorMap(self.parallel_controller)
self.rank = self.parallel_controller.rank
self.block_scale_factor=block_scale_factor
self.comm = self.parallel_controller.comm
self.size = self.parallel_controller.num_procs
self.rank = self.parallel_controller.rank
self.glb_bounds_min = [0, 0, 0]
self.glb_bounds_max = [0, 0, 0]
self.glb_min_h = 0
self.glb_max_h = 0
self.local_bounds_min = [0,0,0]
self.local_bounds_max = [0,0,0]
self.local_min_h = 0
self.local_max_h = 0
self.local_cell_map = {}
self.global_cell_map = {}
##########################################################################
# Public interface
##########################################################################
def initialize(self, particles):
"""Initialize the block manager.
The particle arrays are set and the cell manager is created
after the cell/block size is computed.
"""
self.particles = particles
self.arrays = particles.arrays
# setup the cell manager
self._set_dirty()
self._compute_block_size()
self._setup_cell_manager()
def update(self):
"""Parallel update.
After a call to this function, each processor has it's local
and remote particles necessary for a simulation.
"""
cm = self.cm
pmap = self.processor_map
# remove all remote particles
self._remove_remote_particles()
# bin the particles
self._rebin_particles()
# update cell map
pmap.global_update(cm.cells_dict)
# set the array pids
self._set_array_pid()
# exchange neighbor info
self._exchange_neighbor_particles()
# reset the arrays to dirty so locally we are unaffected
self._set_dirty()
def update_remote_particle_properties(self, props):
self.update()
###########################################################################
# Non public interface
###########################################################################
def _add_neighbor_particles(self, data):
"""Append remote particles to the local arrays.
Parameters:
-----------
data : dictionary
A dictionary keyed on processor id with value equal to a list of
particle arrays, corresponding to the local arrays in `arrays`
that contain remote particles from that processor.
"""
arrays = self.arrays
numarrays = len(arrays)
remote_particle_indices = []
for i in range(numarrays):
num_local = arrays[i].get_number_of_particles()
remote_particle_indices.append( [num_local, num_local] )
for pid in data:
if not pid == self.rank:
parray_list = data[pid]
for i in range(numarrays):
src = parray_list[i]
dst = arrays[i]
remote_particle_indices[i][1] += src.get_number_of_particles()
dst.append_parray(src)
self.remote_particle_indices = remote_particle_indices
def _get_communication_data(self, cell_list_to_send):
"""Get the particle array data corresponding to the cell list
that needs to be communicated. """
numarrays = len(self.arrays)
cm = self.cm
data = {}
for pid, cell_list in cell_list_to_send.iteritems():
parray_list = []
for i in range(numarrays):
parray_list.append(ParticleArray())
for cid in cell_list:
cell = cm.cells_dict[cid]
index_lists = []
cell.get_particle_ids(index_lists)
for i in range(numarrays):
src = self.arrays[i]
dst = parray_list[i]
index_array = index_lists[i]
pa = src.extract_particles(index_array)
# set the local and tag values
pa.local[:] = 0
pa.tag[:] = get_dummy_tag()
dst.append_parray(pa)
dst.set_name(src.name)
data[pid] = parray_list
return data
for cid, pids in send_cells_to.iteritems():
if len(pids) > 0:
parray_list = []
cell = cm.cells_dict[cid]
index_lists = []
cell.get_particle_ids(index_lists)
for i in range(numarrays):
parray_list.append( ParticleArray() )
src = self.arrays[i]
dst = parray_list[i]
index_array = index_lists[i]
pa = src.extract_particles(index_array)
# set the local and tag values
pa.local[:] = 0
pa.tag[:] = get_dummy_tag()
dst.append(pa)
dst.set_name(src.name)
for pid in pids:
to_send[pid] = parray_list
def _exchange_neighbor_particles(self):
"""Send the cells to neighboring processors."""
pc = self.parallel_controller
pmap = self.processor_map
cm = self.cm
# get the list of cells to send per processor from the processor map
cell_list_to_send = pmap.get_cell_list_to_send()
self.cell_list_to_send = cell_list_to_send
# get the actual particle data to send from the cell manager
data = self._get_communication_data(cell_list_to_send)
# share the data
recv = share_data(self.rank, data.keys(), data, pc.comm, multi=True)
# add the neighbor particles
self._add_neighbor_particles(recv)
def _rebin_particles(self):
"""Locally recompute the cell structure."""
cm = self.cm
# set the particle arrays to dirty
self._set_dirty()
# compute the block size
self._compute_block_size()
# set the cell size and bin
cm.cell_size = self.block_size
cm.rebin_particles()
# remove any empty cells
cm.delete_empty_cells()
def _compute_block_size(self):
"""Compute the block size.
The block size is chosen as some scale factor times the global
largest smoothing length.
"""
self._update_global_properties()
self.block_size = self.block_scale_factor*self.glb_max_h
def _setup_cell_manager(self):
"""Set the cell manager used for binning."""
self.cm = CellManager(arrays_to_bin=self.arrays,
min_cell_size=self.block_size,
max_cell_size=self.block_size,
initialize=True)
def _set_dirty(self):
"""Set the dirty bit for each particle array."""
for array in self.arrays:
array.set_dirty(True)
def _remove_remote_particles(self):
"""Remove all remote particles."""
for array in self.arrays:
to_remove = arange_long(array.num_real_particles,
array.get_number_of_particles())
array.remove_particles(to_remove)
def _set_array_pid(self):
"""Set the processor id for each particle array."""
for array in self.arrays:
array.set_pid(self.rank)
def _barrier(self):
"""Wait till all processors reach this point."""
self.parallel_controller.comm.barrier()
def _update_global_properties(self):
""" Exchange bound and smoothing length information among all
processors.
Notes:
------
At the end of this call, the global min and max values for the
coordinates and smoothing lengths are stored in the attributes
glb_bounds_min/max, glb_min/max_h
"""
data_min = {'x':0, 'y':0, 'z':0, 'h':0}
data_max = {'x':0, 'y':0, 'z':0, 'h':0}
for key in data_min.keys():
mi, ma = self._find_min_max_of_property(key)
data_min[key] = mi
data_max[key] = ma
self.local_bounds_min[0] = data_min['x']
self.local_bounds_min[1] = data_min['y']
self.local_bounds_min[2] = data_min['z']
self.local_bounds_max[0] = data_max['x']
self.local_bounds_max[1] = data_max['y']
self.local_bounds_max[2] = data_max['z']
self.local_min_h = data_min['h']
self.local_max_h = data_max['h']
pc = self.parallel_controller
glb_min, glb_max = pc.get_glb_min_max(data_min, data_max)
self.glb_bounds_min[0] = glb_min['x']
self.glb_bounds_min[1] = glb_min['y']
self.glb_bounds_min[2] = glb_min['z']
self.glb_bounds_max[0] = glb_max['x']
self.glb_bounds_max[1] = glb_max['y']
self.glb_bounds_max[2] = glb_max['z']
self.glb_min_h = glb_min['h']
self.glb_max_h = glb_max['h']
logger.info('(%d) bounds : %s %s'%(pc.rank, self.glb_bounds_min,
self.glb_bounds_max))
logger.info('(%d) min_h : %f, max_h : %f'%(pc.rank, self.glb_min_h,
self.glb_max_h))
def _find_min_max_of_property(self, prop_name):
""" Find the minimum and maximum of the property among all arrays
Parameters:
-----------
prop_name -- the property name to find the bounds for
"""
min = 1e20
max = -1e20
num_particles = 0
for arr in self.arrays:
if arr.get_number_of_particles() == 0:
continue
else:
num_particles += arr.get_number_of_particles()
min_prop = numpy.min(arr.get(prop_name))
max_prop = numpy.max(arr.get(prop_name))
if min > min_prop:
min = min_prop
if max < max_prop:
max = max_prop
return min, max
| [
[
8,
0,
0.0056,
0.0094,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0131,
0.0019,
0,
0.66,
0.0833,
346,
0,
1,
0,
0,
346,
0,
0
],
[
1,
0,
0.015,
0.0019,
0,
0.66,... | [
"\"\"\"A parallel manager that uses blocks to partition the domain. At\nevery iteration, the particles are placed in large bins and these bins\nare exchanged across processors. \n\n\"\"\"",
"from parallel_controller import ParallelController",
"from parallel_manager import ParallelManager",
"from parallel_cel... |
"""API module to simplify import of common names from pysph.parallel package"""
from parallel_cell import ParallelCellManager, ProcessorMap
| [
[
8,
0,
0.2,
0.2,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.6,
0.2,
0,
0.66,
1,
568,
0,
2,
0,
0,
568,
0,
0
]
] | [
"\"\"\"API module to simplify import of common names from pysph.parallel package\"\"\"",
"from parallel_cell import ParallelCellManager, ProcessorMap"
] |
""" Contains class to perform load balancing using METIS[1]/SCOTCH[2]
[1] METIS: http://glaros.dtc.umn.edu/gkhome/views/metis
[2] SCOTCH: http://www.labri.fr/perso/pelegrin/scotch/
Note: Either of METIS/SCOTCH is acceptable. Installing one of these is enough.
First METIS is attempted to load and if it fails SCOTCH is tried. SCOTCH is
used in the METIS compatibility mode. Only the function `METIS_PartGraphKway`
is used from either of the libraries
"""
# logging imports
import logging
logger = logging.getLogger()
# local imports
from pysph.base.cell import py_construct_immediate_neighbor_list
from load_balancer_mkmeans import LoadBalancerMKMeans
import sys
import ctypes
from ctypes import c_int32 as c_int
if sys.platform.startswith('linux'):
try:
libmetis = ctypes.cdll.LoadLibrary('libmetis.so')
except OSError:
try:
libmetis = ctypes.cdll.LoadLibrary('libscotchmetis.so')
except OSError:
raise ImportError('could not load METIS library, try installing '
'METIS/SCOTCH and ensure it is in LD_LIBRARY_PATH')
elif sys.platform.startswith('win'):
try:
libmetis = ctypes.cdll.LoadLibrary('metis')
except OSError:
try:
libmetis = ctypes.cdll.LoadLibrary('scotchmetis')
except OSError:
raise ImportError('could not load METIS library, try installing '
'METIS/SCOTCH and ensure it is in LD_LIBRARY_PATH')
else:
raise ImportError('sorry, donno how to use ctypes (for METIS/SCOTCH'
'load_balancing) on non-linux/win platform, any help appreciated')
METIS_PartGraphKway = libmetis.METIS_PartGraphKway
c_int_p = ctypes.POINTER(c_int)
METIS_PartGraphKway.argtypes = [c_int_p, c_int_p, c_int_p, c_int_p, c_int_p,
c_int_p, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p]
def cargs_from_wadj(xadj, adjncy, vwgt, bid_index, nparts):
""" return the ctype arguments for metis from the adjacency data
Parameters:
-----------
- xadj,adjncy,vwgt: lists containing adjacency data in CSR format as
required by :func:`METIS_PartGraphKway` (check METIS manual)
- bid_index: dict mapping bid to index in the adjacency data
- nparts: number of partitions to make of the graph
Returns:
--------
- n, xadj, adjncy, vwgt, adjwgt, wgtflag, numflag, nparts, options,
edgecut, part: the arguments for the :func:`METIS_PartGraphKway`
functions in ctype data format (all are pointers to c_int32)
"""
n = len(xadj)-1
c_n = (c_int*1)(n)
c_numflag = (c_int*1)()
c_adjwgt = None
c_nparts = (c_int*1)(nparts)
c_options = (c_int*5)()
c_edgecut = (c_int*1)()
c_part = (c_int*n)()
c_xadj = (c_int*(n+1))()
c_xadj[:] = xadj
c_adjncy = (c_int*len(adjncy))()
c_adjncy[:] = adjncy
if vwgt:
c_vwgt = (c_int*n)()
c_vwgt[:] = vwgt
c_wgtflag = (c_int*1)(2)
else:
c_vwgt = None
c_wgtflag = (c_int*1)(0)
return (c_n, c_xadj, c_adjncy, c_vwgt, c_adjwgt, c_wgtflag, c_numflag,
c_nparts, c_options, c_edgecut, c_part)
def wadj_from_adj_list(adj_list):
""" return vertex weights and adjacency information from adj_list
as returned by :func:`adj_list_from_blocks` """
bid_index = {}
xadj = [0]
adjncy = []
vwgt = []
for i,tmp in enumerate(adj_list):
bid_index[tmp[0]] = i
for bid, adjl, np in adj_list:
adjncy.extend((bid_index[b] for b in adjl))
xadj.append(len(adjncy))
vwgt.append(np)
return xadj, adjncy, vwgt, bid_index
def adj_list_from_blocks(block_proc, proc_block_np):
""" return adjacency list information for use by METIS partitioning
Arguments:
----------
- block_proc: dict mapping bid:proc
- proc_block_map: list of dict bid:np, in sequence of the process to
which block belongs
Returns:
--------
- adj_list: list of 3-tuples, one for each block in proc-block_np
The 3-tuple consists of (bid, adjacent bids, num_particles in bid)
"""
adj_list = []
nbrs = []
i = 0
for blocks in proc_block_np:
for bid, np in blocks.iteritems():
nbrs[:] = []
adjl = []
py_construct_immediate_neighbor_list(bid, nbrs, False)
for nbr in nbrs:
if nbr in block_proc:
adjl.append(nbr)
adj_list.append((bid, adjl, np))
i += 1
return adj_list
def lb_metis(block_proc, proc_block_np):
""" Partition the blocks in proc_block_np using METIS
Arguments:
----------
- block_proc: dict mapping bid:proc
- proc_block_map: list of dict bid:np, in sequence of the process to
which block belongs
Returns:
--------
- block_proc: dict mapping bid:proc for the new partitioning generated
by METIS
"""
adj_list = adj_list_from_blocks(block_proc, proc_block_np)
xadj, adjncy, vwgt, bid_index = wadj_from_adj_list(adj_list)
c_args = cargs_from_wadj(xadj, adjncy, vwgt, bid_index, len(proc_block_np))
METIS_PartGraphKway(*c_args)
ret = c_args[-1]
ret_block_proc = {}
for bid,bindex in bid_index.iteritems():
ret_block_proc[bid] = ret[bindex]
return ret_block_proc
###############################################################################
# `LoadBalancerMetis` class.
###############################################################################
class LoadBalancerMetis(LoadBalancerMKMeans):
def __init__(self, **args):
LoadBalancerMKMeans.__init__(self, **args)
self.method = 'serial_metis'
def load_balance_func_serial_metis(self, **args):
""" serial load balance function which uses METIS to do the partitioning
calls the :class:Loadbalancer :meth:`load_balance_func_serial`
"""
self.load_balance_func_serial('metis', **args)
def load_redistr_metis(self, block_proc, proc_block_np, **args):
""" function to redistribute the cells amongst processes using METIS
This is called by :class:Loadbalancer :meth:`load_balance_func_serial`
"""
block_proc = lb_metis(block_proc, proc_block_np)
self.particles_per_proc = [0]*len(proc_block_np)
block_np = {}
for b in proc_block_np:
block_np.update(b)
for bid,proc in block_proc.iteritems():
self.particles_per_proc[proc] += block_np[bid]
self.balancing_done = True
return block_proc, self.particles_per_proc
###############################################################################
| [
[
8,
0,
0.0281,
0.051,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0663,
0.0051,
0,
0.66,
0.0625,
715,
0,
1,
0,
0,
715,
0,
0
],
[
14,
0,
0.0714,
0.0051,
0,
0.66... | [
"\"\"\" Contains class to perform load balancing using METIS[1]/SCOTCH[2]\n\n[1] METIS: http://glaros.dtc.umn.edu/gkhome/views/metis\n[2] SCOTCH: http://www.labri.fr/perso/pelegrin/scotch/\n\nNote: Either of METIS/SCOTCH is acceptable. Installing one of these is enough.\nFirst METIS is attempted to load and if it f... |
import pysph.base.api as base
import pysph.solver.api as solver
import pysph.sph.api as sph
if solver.HAS_CL:
import pyopencl as cl
else:
try:
import nose.plugins.skip as skip
reason = "PyOpenCL not installed"
raise skip.SkipTest(reason)
except ImportError:
pass
import numpy
import unittest
from os import path
CLDomain = base.DomainManagerType
CLLocator = base.OpenCLNeighborLocatorType
class FunctionTestCase(unittest.TestCase):
""" Simple test for the NBodyForce """
def runTest(self):
pass
def setUp(self):
""" The setup consists of four particles placed at the
vertices of a unit square. The force function to be tested is:
..math::
f_i = \sum_{j=1}^{4} \frac{m_j}{|x_j - x_i|^3 +
\eps}(x_j - x_i)
The mass of each particle is 1
"""
self.np = 4
# define the particle properties here
x = numpy.array([0, 0, 1, 1], numpy.float64)
y = numpy.array([0, 1, 1, 0], numpy.float64)
z = numpy.zeros_like(x)
m = numpy.ones_like(x)
u = numpy.array([1, 0, 0, -1], numpy.float64)
p = numpy.array([0, 0, 1, 1], numpy.float64)
self.kernel = base.CubicSplineKernel(dim=2)
# create a ParticleArray with double precision
self.pa = pa = base.get_particle_array(name="test", x=x, y=y, z=z,
m=m, u=u, p=p)
# create a particles instance
self.particles = base.Particles([pa,])
self.cl_particles = base.CLParticles(
arrays=[self.pa,],
domain_manager_type=CLDomain.DomainManager,
cl_locator_type=CLLocator.AllPairNeighborLocator)
# define the function here
#self.func = func = sph.NBodyForce.get_func(pa, pa)
if solver.HAS_CL:
self.ctx = ctx = solver.create_some_context()
self.q = q = cl.CommandQueue(ctx)
self.setup()
def setup(self):
pass
def get_reference_solution(self):
""" Evaluate the force on each particle manually """
# Define the reference solution here
raise NotImplementedError
def setup_calcs(self):
pa = self.pa
# create a Cython Calc
calc = sph.SPHCalc( self.particles, [pa,], pa,
self.kernel, [self.func,], ['rho'] )
self.calc = calc
# create an OpenCL Calc
cl_calc = sph.CLCalc( self.cl_particles, [pa,], pa,
self.kernel, [self.func,], ['rho'] )
self.cl_calc = cl_calc
def _test(self, precision, nd):
""" Test the PySPH solution """
pa = self.pa
pa.set_cl_precision(precision)
# setup the calcs
self.setup_calcs()
# setup OpenCL
self.cl_calc.setup_cl(self.ctx)
# get the reference solution
reference_solution = self.get_reference_solution()
self.calc.sph()
cython_tmpx = pa._tmpx.copy()
cython_tmpy = pa._tmpy.copy()
cython_tmpz = pa._tmpz.copy()
pa._tmpx[:] = -1
pa._tmpy[:] = -1
pa._tmpz[:] = -1
self.cl_calc.sph()
pa.read_from_buffer()
opencl_tmpx = pa._tmpx
opencl_tmpy = pa._tmpy
opencl_tmpz = pa._tmpz
for i in range(self.np):
self.assertAlmostEqual(reference_solution[i].x, cython_tmpx[i],nd)
self.assertAlmostEqual(reference_solution[i].y, cython_tmpy[i],nd)
self.assertAlmostEqual(reference_solution[i].z, cython_tmpz[i],nd)
self.assertAlmostEqual(reference_solution[i].x, opencl_tmpx[i],nd)
self.assertAlmostEqual(reference_solution[i].y, opencl_tmpy[i],nd)
self.assertAlmostEqual(reference_solution[i].z, opencl_tmpz[i],nd)
| [
[
1,
0,
0.0071,
0.0071,
0,
0.66,
0,
212,
0,
1,
0,
0,
212,
0,
0
],
[
1,
0,
0.0143,
0.0071,
0,
0.66,
0.1111,
683,
0,
1,
0,
0,
683,
0,
0
],
[
1,
0,
0.0214,
0.0071,
0,
... | [
"import pysph.base.api as base",
"import pysph.solver.api as solver",
"import pysph.sph.api as sph",
"if solver.HAS_CL:\n import pyopencl as cl\n\nelse:\n try:\n import nose.plugins.skip as skip\n reason = \"PyOpenCL not installed\"\n raise skip.SkipTest(reason)",
" import pyop... |
"""
Module containing some data required for tests of the sph module.
"""
# standard imports
import numpy
# local imports
from pysph.base.particle_array import *
def generate_sample_dataset_1():
"""
Generate test test data.
Look at image sph_test_data1.png
"""
x = numpy.array([-1.0, 0.0, 1.0, -1.0, 0.0, 1.0, -1.0, 0.0, 1.0])
y = numpy.array([-1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0])
z = numpy.array([0., 0, 0, 0, 0, 0, 0, 0, 0])
h = numpy.array([1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1])
m = numpy.array([1., 1, 1, 1, 1, 1, 1, 1, 1])
rho = numpy.array([1., 1, 1, 1, 1, 1, 1, 1, 1])
u = numpy.zeros(9)
v = numpy.zeros(9)
w = numpy.zeros(9)
parr1 = ParticleArray(name='parr1', **{'x':{'data':x}, 'y':{'data':y},
'z':{'data':z}, 'h':{'data':h},
'm':{'data':m},
'rho':{'data':rho},
'velx':{'data':u},
'v':{'data':v},
'w':{'data':w}})
return [parr1]
def generate_sample_dataset_2():
"""
Generate test data.
Look at image sph_test_data2.png.
"""
x = numpy.array([0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.5])
y = numpy.array([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.5])
z = numpy.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.5])
h = numpy.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
m = numpy.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
rho = numpy.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
t = numpy.zeros(9)
parr1 = ParticleArray(name='parr1', **{'x':{'data':x},
'y':{'data':y},
'z':{'data':z},
'm':{'data':m},
'rho':{'data':rho},
'h':{'data':h},
't':{'data':t}})
return [parr1]
| [
[
8,
0,
0.0328,
0.0492,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.082,
0.0164,
0,
0.66,
0.25,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.1311,
0.0164,
0,
0.66,
... | [
"\"\"\"\nModule containing some data required for tests of the sph module.\n\"\"\"",
"import numpy",
"from pysph.base.particle_array import *",
"def generate_sample_dataset_1():\n \"\"\"\n Generate test test data.\n Look at image sph_test_data1.png\n \"\"\"\n x = numpy.array([-1.0, 0.0, 1.0, -1... |
"""API module to simplify import of common names from pysph.sph package"""
#Import from calc
from sph_calc import SPHCalc, CLCalc
from sph_func import SPHFunction, SPHFunctionParticle, CSPHFunctionParticle
############################################################################
# IMPORT FUNCTIONS
############################################################################
#Import basic functions
from funcs.basic_funcs import SPHGradient, \
SPHLaplacian, CountNeighbors, SPH as SPHInterpolation,\
VelocityGradient3D, VelocityGradient2D
#Import boundary functions
from funcs.boundary_funcs import MonaghanBoundaryForce, LennardJonesForce, \
BeckerBoundaryForce
#Import density functions
from funcs.density_funcs import SPHRho, SPHDensityRate
#Import Energy functions
from funcs.energy_funcs import EnergyEquation, EnergyEquationAVisc,\
EnergyEquationNoVisc, ArtificialHeat, \
EnergyEquationWithSignalBasedViscosity
#Import viscosity functions
from funcs.viscosity_funcs import MonaghanArtificialViscosity, \
MorrisViscosity, MomentumEquationSignalBasedViscosity
#Import pressure functions
from funcs.pressure_funcs import SPHPressureGradient, MomentumEquation
#Positon Steppers
from funcs.position_funcs import PositionStepping
#Import XSPH functions
from funcs.xsph_funcs import XSPHDensityRate, XSPHCorrection
#Import Equation of state functions
from funcs.eos_funcs import IdealGasEquation, TaitEquation, \
IsothermalEquation, MieGruneisenEquation
#Import external force functions
from funcs.external_force import GravityForce, VectorForce, MoveCircleX,\
MoveCircleY, NBodyForce
#Import ADKE functions
from funcs.adke_funcs import ADKEPilotRho, ADKESmoothingUpdate,\
SPHVelocityDivergence as VelocityDivergence, ADKEConductionCoeffUpdate,\
SetSmoothingLength
# Import stress functions
from funcs.stress_funcs import HookesDeviatoricStressRate2D, \
HookesDeviatoricStressRate3D, MomentumEquationWithStress2D,\
MonaghanArtificialStress, MonaghanArtStressAcc, \
EnergyEquationWithStress2D, VonMisesPlasticity2D
from funcs.stress_funcs import get_K, get_nu, get_G
# Import test funcs
from funcs.test_funcs import ArtificialPotentialForce
# Import GSPH funcs
from funcs.gsph_funcs import GSPHMomentumEquation, GSPHEnergyEquation,\
GSPHPositionStepping
############################################################################
| [
[
8,
0,
0.0145,
0.0145,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.058,
0.0145,
0,
0.66,
0.0588,
671,
0,
2,
0,
0,
671,
0,
0
],
[
1,
0,
0.0725,
0.0145,
0,
0.66,... | [
"\"\"\"API module to simplify import of common names from pysph.sph package\"\"\"",
"from sph_calc import SPHCalc, CLCalc",
"from sph_func import SPHFunction, SPHFunctionParticle, CSPHFunctionParticle",
"from funcs.basic_funcs import SPHGradient, \\\n SPHLaplacian, CountNeighbors, SPH as SPHInterpolation,... |
import numpy
from pysph.solver.cl_utils import cl_read, get_real, HAS_CL, get_pysph_root,\
create_some_context, enqueue_copy
import pysph.solver.cl_utils as clu
if HAS_CL:
import pyopencl as cl
mf = cl.mem_flags
# Cython functions for neighbor list construction
from nnps_util import cbin, unflatten
from point import Point
from cell import py_find_cell_id
# radix sort class
from radix_sort import AMDRadixSort, NvidiaRadixSort
class DomainManagerType:
DomainManager = 0
LinkedListManager = 1
RadixSortManager = 2
class DomainManager:
def __init__(self, arrays, context=None, with_cl=True, device='CPU'):
if len(arrays) == 0:
raise RuntimeError("No Arrays provided!")
self.arrays = arrays
self.narrays = narrays = len(arrays)
# check if the arrays have unique names
if narrays > 1:
for i in range(1, narrays):
if arrays[i].name == arrays[i-1].name:
msg = "You mnust provide arrays with unique names!"
raise RuntimeError(msg)
if arrays[i].cl_precision != arrays[i-1].cl_precision:
msg = "Arrays cannot have different precision!"
raise RuntimeError(msg)
# set the cl_precision
self.cl_precision = arrays[0].cl_precision
# setup OpenCL
if with_cl:
if HAS_CL:
self.with_cl = True
self._setup_cl(context, device)
else:
raise RuntimeError("PyOpenCL not found!")
else:
self.with_cl = False
#######################################################################
# public interface
#######################################################################
def update(self):
pass
#######################################################################
# object interface
#######################################################################
def __iter__(self):
"""The Domain manager produces an iterator for all it's data.
This is needed as the function that will ask for cell
neighbors should be agnostic about the DomainManager type and
simply requires a means to iterate through it's data.
"""
return self
def next(self):
raise RuntimeError("Do not iterate over the DomainManager base class!")
###########################################################################
# non-public interface
###########################################################################
def _setup_cl(self, context=None, device=None):
""" OpenCL setup for the CLNNPSManager """
if not context:
if device=='GPU' or device=='gpu':
self.context = context = clu.create_context_from_gpu()
else:
self.context = context = clu.create_context_from_cpu()
else:
self.context = context
self.queue = queue = cl.CommandQueue(context)
# allocate the particle array device buffers
for i in range(self.narrays):
pa = self.arrays[i]
pa.setup_cl(context, queue)
# create the program
self._setup_program()
def _find_bounds(self):
""" Find the bounds for the particle arrays.
The bounds calculated are the simulation cube, defined by the
minimum and maximum extents of the particle arrays and the
maximum smoothing length which is used for determining an safe
cell size for binning.
"""
inf = numpy.inf
mx, my, mz = inf, inf, inf
Mx, My, Mz = -inf, -inf, -inf
Mh = 0.0
# update the minimum and maximum for the particle arrays
for pa in self.arrays:
pa.read_from_buffer()
pa.update_min_max(props=['x','y','z','h'])
if pa.properties['x'].minimum < mx:
mx = get_real( pa.properties['x'].minimum, self.cl_precision )
if pa.properties['y'].minimum < my:
my = get_real( pa.properties['y'].minimum, self.cl_precision )
if pa.properties['z'].minimum < mz:
mz = get_real( pa.properties['z'].minimum, self.cl_precision )
if pa.properties['x'].maximum > Mx:
Mx = get_real( pa.properties['x'].maximum, self.cl_precision )
if pa.properties['y'].maximum > My:
My = get_real( pa.properties['y'].maximum, self.cl_precision )
if pa.properties['z'].maximum > Mz:
Mz = get_real( pa.properties['z'].maximum, self.cl_precision )
if pa.properties['h'].maximum > Mh:
Mh = get_real( pa.properties['h'].maximum, self.cl_precision )
self.mx, self.my, self.mz = mx, my, mz
self.Mx, self.My, self.Mz = Mx, My, Mz
self.Mh = Mh
self._set_cell_size()
self._find_num_cells()
def _set_cell_size(self):
""" Set the cell size for binning
Notes:
------
If the cell size is being chosen based on the particle
smoothing lengths, we choose a cell size slightly larger than
$k\timesh$, where $k$ is the maximum scale factor for the SPH
kernel. Currently we use the size k + 1
If no bin sie is provided, the default
value 2*max(h) is used
"""
if not self.const_cell_size:
self.cell_size = get_real((self.kernel_scale_factor+1)*self.Mh,
self.cl_precision)
else:
self.cell_size = self.const_cell_size
def _find_num_cells(self):
""" Find the number of Cells in each coordinate direction
The number of cells is found from the simulation bounds and
the cell size used for binning.
"""
max_pnt = Point(self.Mx, self.My, self.Mz)
max_cid = py_find_cell_id(max_pnt, self.cell_size)
min_pnt = Point(self.mx, self.my, self.mz)
min_cid = py_find_cell_id(min_pnt, self.cell_size)
self.ncx = numpy.int32(max_cid.x - min_cid.x + 1)
self.ncy = numpy.int32(max_cid.y - min_cid.y + 1)
self.ncz = numpy.int32(max_cid.z - min_cid.z + 1)
self.mcx = numpy.int32(min_cid.x)
self.mcy = numpy.int32(min_cid.y)
self.mcz = numpy.int32(min_cid.z)
self.ncells = numpy.int32(self.ncx * self.ncy * self.ncz)
def _setup_program(self):
pass
class LinkedListManager(DomainManager):
""" Domain manager using bins as the indexing scheme and a linked
list as the neighbor locator scheme.
Data Attributes:
----------------
arrays : list
The particle arrays handled by the manager
head : dict
Head arrays for each ParticleArray maintained.
The dictionary is keyed on name of the ParticleArray,
with the head array as value.
Next : dict
Next array for each ParticleArray maintained.
The dictionary is keyed on name of the ParticleArray,
with the next array as value.
const_cell_size : REAL
Optional constant cell size used for binning.
cell_size : REAL
Cell size used for binning.
cl_precision : string
OpenCL precision to use. This is taken from the ParticleArrays
Mx, mx, My, my, Mz, mz -- REAL
Global bounds for the binning
ncx, ncy, ncz -- uint
Number of cells in each coordinate direction
ncells -- uint
Total number of cells : (ncx * ncy * ncz)
with_cl -- bool
Flag to use OpenCL for the neighbor list generation.
"""
def __init__(self, arrays, cell_size=None, context=None,
kernel_scale_factor = 2.0, with_cl=True):
""" Construct a linked list manager.
Parameters:
------------
arrays -- list
The ParticleArrays being managed.
cell_size -- REAL
The optional bin size to use
kernel_scale_factor --REAL.
the scale factor for the radius
with_cl -- bool
Explicitly choose OpenCL
A LinkedListManager constructs and maintains a linked list for
a list of particle arrays. The linked list data structure is
consists of two arrays per particle array
head : An integer array of size ncells, where ncells is the
total number of cells in the domain. Each entry points to the
index of a particle belonging to the cell. A negative index
(-1) indicates and empty cell.
next : An integer array of size num_particles. Each entry
points to the next particle in the same cell. A negative index
(-1) indicates no more particles.
The bin size, if provided is constant in each coordinate
direction. The default choice for the bin size is twice the
maximum smoothing length for all particles in the domain.
"""
DomainManager.__init__(self, arrays, context, with_cl)
# set the kernel scale factor
self.kernel_scale_factor = kernel_scale_factor
# set the cell size
self.const_cell_size = cell_size
if cell_size:
self.const_cell_size = get_real(cell_size, self.cl_precision)
# find global bounds (simulation box and ncells)
self._find_bounds()
# The linked list structures for the arrays.
self.Next = {}
self.head = {}
self.cellids = {}
self.locks = {}
self.indices = {}
self.ix = {}
self.iy = {}
self.iz = {}
# device linked list structures
self.dnext = {}
self.dhead = {}
self.dcellids = {}
self.dlocks = {}
self.dindices = {}
self.dix = {}
self.diy = {}
self.diz = {}
# dict for kernel launch parameters
self.global_sizes = {}
self.local_sizes = {}
# initialize counter for the iterator
self._current_cell = 0
# initialize the linked list
self._init_linked_list()
#######################################################################
# public interface
#######################################################################
def update(self):
""" Update the linked list """
# find the bounds for the manager
self._find_bounds()
# reset the data structures
self._init_linked_list()
# update the data structures
if self.with_cl:
self._cl_update()
else:
self._cy_update()
def enqueue_copy(self):
""" Copy the Buffer contents to the host
The buffers copied are
cellids, head, next, dix, diy, diz
"""
if self.with_cl:
for pa in self.arrays:
enqueue_copy(self.queue, dst=self.cellids[pa.name],
src=self.dcellids[pa.name])
enqueue_copy(self.queue, dst=self.head[pa.name],
src=self.dhead[pa.name])
enqueue_copy(self.queue, dst=self.Next[pa.name],
src=self.dnext[pa.name])
enqueue_copy(self.queue, dst=self.ix[pa.name],
src=self.dix[pa.name])
enqueue_copy(self.queue, dst=self.iy[pa.name],
src=self.diy[pa.name])
enqueue_copy(self.queue, dst=self.iz[pa.name],
src=self.diz[pa.name])
###########################################################################
# non-public interface
###########################################################################
def _init_linked_list(self):
""" Initialize the linked list dictionaries to store the
particle neighbor information.
Three arrays, namely, head, next and cellids are created per
particle array.
"""
ncells = self.ncells
for i in range(self.narrays):
pa = self.arrays[i]
np = pa.get_number_of_particles()
head = numpy.ones(ncells, numpy.int32) * numpy.int32(-1)
next = numpy.ones(np, numpy.int32) * numpy.int32(-1)
cellids = numpy.ones(np, numpy.uint32)
locks = numpy.zeros(ncells, numpy.int32)
indices = numpy.arange(np, dtype=numpy.uint32)
ix = numpy.ones(np, numpy.uint32)
iy = numpy.ones(np, numpy.uint32)
iz = numpy.ones(np, numpy.uint32)
self.head[pa.name] = head
self.Next[pa.name] = next
self.cellids[pa.name] = cellids
self.locks[pa.name] = locks
self.indices[pa.name] = indices
self.ix[pa.name] = ix
self.iy[pa.name] = iy
self.iz[pa.name] = iz
if self.with_cl:
self._init_device_buffers()
def _init_device_buffers(self):
""" Initialize the device buffers """
for i in range(self.narrays):
pa = self.arrays[i]
np = pa.get_number_of_particles()
# initialize the kerel launch parameters
self.global_sizes[pa.name] = (np,)
self.local_sizes[pa.name] = (1,)
head = self.head[pa.name]
next = self.Next[pa.name]
cellids = self.cellids[pa.name]
locks = self.locks[pa.name]
indices = self.indices[pa.name]
ix = self.ix[pa.name]
iy = self.iy[pa.name]
iz = self.iz[pa.name]
dhead = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=head)
dnext = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=next)
dcellids = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=cellids)
dlocks = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=locks)
dindices = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=indices)
dix = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=ix)
diy = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=iy)
diz = cl.Buffer(self.context,
mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=iz)
self.dhead[pa.name] = dhead
self.dnext[pa.name] = dnext
self.dcellids[pa.name] = dcellids
self.dlocks[pa.name] = dlocks
self.dindices[pa.name] = dindices
self.dix[pa.name] = dix
self.diy[pa.name] = diy
self.diz[pa.name] = diz
def _cy_update(self):
""" Construct the linked lists for the particle arrays using Cython"""
ncx, ncy, ncz = self.ncx, self.ncy, self.ncz
mx, my, mz = self.mx, self.my, self.mz
cell_size = self.cell_size
cell_size = get_real(self.cell_size, self.cl_precision)
for i in range(self.narrays):
pa = self.arrays[i]
np = pa.get_number_of_particles()
x, y, z = pa.get('x','y','z')
if self.cl_precision == 'single':
x = x.astype(numpy.float32)
y = y.astype(numpy.float32)
z = z.astype(numpy.float32)
cbin( x, y, z,
self.cellids[pa.name],
self.ix[pa.name],
self.iy[pa.name],
self.iz[pa.name],
self.head[pa.name],
self.Next[pa.name],
mx, my, mz,
numpy.int32(ncx), numpy.int32(ncy), numpy.int32(ncz),
cell_size, numpy.int32(np),
self.mcx, self.mcy, self.mcz
)
def _cl_update(self):
""" Construct the linked lists for the particle arrays using OpenCL"""
for i in range(self.narrays):
pa = self.arrays[i]
x = pa.get_cl_buffer('x')
y = pa.get_cl_buffer('y')
z = pa.get_cl_buffer('z')
# Bin particles
self.prog.bin( self.queue,
self.global_sizes[pa.name],
self.local_sizes[pa.name],
x, y, z,
self.dcellids[pa.name],
self.dix[pa.name],
self.diy[pa.name],
self.diz[pa.name],
self.mx,
self.my,
self.mz,
self.ncx,
self.ncy,
self.ncz,
self.cell_size,
self.mcx,
self.mcy,
self.mcz
).wait()
self.prog.construct_neighbor_list(self.queue,
self.global_sizes[pa.name],
self.local_sizes[pa.name],
self.dcellids[pa.name],
self.dhead[pa.name],
self.dnext[pa.name],
self.dlocks[pa.name]
).wait()
def _setup_program(self):
""" Read the OpenCL kernel source file and build the program """
src_file = get_pysph_root() + '/base/linked_list.cl'
src = cl_read(src_file, precision=self.cl_precision)
self.prog = cl.Program(self.context, src).build()
#######################################################################
# object interface
#######################################################################
def next(self):
"""Iterator interface to get cell neighbors.
Usage:
------
for cell_nbrs in LinkedListManager():
...
where, the length of the iterator is `ncells` and at each call,
the `forward` neighbors for the cell are returned.
The `forward` cells for a given cell with index cid are
neighboring cells with an index cid' >= cid
"""
if self._current_cell == self.ncells:
self._current_cell = 0
raise StopIteration
else:
# we are getting neighbors for the current cell
cid = self._current_cell
# get the cell indices for the current cell to search for
ncx = self.ncx
ncy = self.ncy
ncz = self.ncz
ix, iy, iz = unflatten(cid, ncx, ncy)
# determine the range of search
imin = max(ix -1, 0)
jmin = max(iy -1, 0)
kmin = max(iz -1, 0)
imax = min(ix + 2, ncx)
jmax = min(iy + 2, ncy)
kmax = min(iz + 2, ncz)
# raise the counter for the current cell
self._current_cell += 1
return [i+j*ncx+k*ncx*ncy \
for i in range(imin, imax) \
for j in range(jmin, jmax) \
for k in range(kmin, kmax) \
if i+j*ncx+k*ncx*ncy >= cid]
##########################################################################
# DEPRECATED
##########################################################################
def reset_cy_data(self):
for pa in self.arrays:
head = self.head[pa.name]
next = self.Next[pa.name]
head[:] = -1
next[:] = -1
def reset_cl_data(self):
for pa in self.arrays:
dhead = self.dhead[pa.name]
dnext = self.dnext[pa.name]
dlocks = self.dlocks[pa.name]
global_sizes = (int(self.ncells),)
val = numpy.int32(-1)
self.prog.reset(self.queue, global_sizes, None, dhead, val).wait()
val = numpy.int32(0)
self.prog.reset(self.queue, global_sizes, None, dlocks, val).wait()
global_sizes = self.global_sizes[pa.name]
val = numpy.int32(-1)
self.prog.reset(self.queue, global_sizes, None, dnext, val).wait()
def reset_data(self):
""" Initialize the data structures.
Head is initialized to -1
Next is initialized to -1
locks is initialized to 0
"""
if self.with_cl:
self.reset_cl_data()
else:
self.reset_cy_data()
class RadixSortManager(DomainManager):
"""Spatial indexing scheme based on the radix sort.
The radix sort can be used to determine neighbor information in
the following way. Consider the particle distribution in an
idealized one dimensional cell structure as:
_____________
| | | |
| 2 |0,1| 3 |
|___|___|___|
that is, particles with indices 0 and 1 are in cell 1, particle 2
is in cell 0 and 3 in cell 3
We construct two arrays:
cellids (size=np) : [1,1,0,2] and
indices (size=np) : [0,1,2,3]
and sort the indices based on the keys. After the sorting routine,
the arrays are:
cellids (size=np) : [0,1,1,2]
indices (size=np) : [2,0,1,3]
Now we can compute an array cell_counts (size=ncells+1) from
the sorted cellids as:
cellc = [0, 1, 3, 4],
which can be computed by launching one thread per particle. If the
sorted cellid to the left is different from this cellid, then this
particle is at a cell boundary and the index of that particle in
the sorted cellids is placed in the `cellc` array at that
location. Of course, there will be as many cell boundaries as
there are cells. The boundary conditions will have to be handled
separately.
Now using this we can determine the particles that belong to a
particular cell like so:
particles in cell0 = indices[ cellids[cellc[0]] : cellids[cellc[1]] ]
"""
def __init__(self, arrays, cell_size=None, context=None,
kernel_scale_factor = 2.0, with_cl=True, device='CPU'):
""" Construct a RadixSort manager.
Parameters:
------------
arrays -- list
The ParticleArrays being managed.
cell_size -- REAL
The optional bin size to use
kernel_scale_factor --REAL.
the scale factor for the radius
with_cl -- bool
Explicitly choose OpenCL
The RadixSort manager constructs and maintains the following
attributes for each array being indexed:
(i) cellids (size=np, uint32) : Flattened cell indices for the particles.
(ii) indices (size=np, uint32) : Particle indices
(iii) cell_counts(size=ncells+1, uint32) : Cell count array
The bin size, if provided is constant in each coordinate
direction. The default choice for the bin size is twice the
maximum smoothing length for all particles in the domain.
"""
DomainManager.__init__(self, arrays, context, with_cl, device)
# set the kernel scale factor
self.kernel_scale_factor = kernel_scale_factor
# set the cell size
self.const_cell_size = cell_size
if cell_size is not None:
self.const_cell_size = get_real(cell_size, self.cl_precision)
# find global bounds (simulation box and ncells)
self._find_bounds()
# The arrays stored for the RadixSortManager
self.cellids = {}
self.indices = {}
self.cell_counts = {}
# setup the RadixSort objects
self.rsort = rsort = {}
self._setup_radix_sort()
# Corresponding device arrays
self.dcellids = {}
self.dindices = {}
self.dcell_counts = {}
# dict for kernel launch parameters
self.global_sizes = {}
self.local_sizes = {}
# initialize counter for the iterator
self._current_cell = 0
# initialize the host and device buffers
self._init_buffers()
#######################################################################
# public interface
#######################################################################
def update(self):
""" Update the linked list """
# find the bounds for the manager
self._find_bounds()
# reset the data structures
self._init_buffers()
# update the data structures
if self.with_cl:
self._cl_update()
else:
self._py_update()
def enqueue_copy(self):
""" Copy the Buffer contents to the host
The cell counts buffer is copied to the host.
"""
if self.with_cl:
for pa in self.arrays:
enqueue_copy(self.queue, dst=self.cellids[pa.name],
src=self.dcellids[pa.name])
enqueue_copy(self.queue, dst=self.indices[pa.name],
src=self.dindices[pa.name])
enqueue_copy(queue=self.queue, dst=self.cell_counts[pa.name],
src=self.dcell_counts[pa.name])
###########################################################################
# non-public interface
###########################################################################
def _init_buffers(self):
"""Allocate host and device buffers for the RadixSortManager
The arrays needed for the manager are:
(a) cellids of size np which indicates which cell the particle
belongs to.
(b) indices of size np which is initially a linear index range
for the particles. After sorting, this array is used to
determine particles within a cell.
(c) cell_counts of size ncells +1 which is used to determine
the start and end index for the particles within a cell.
"""
# at this point the number of cells is known
ncells = self.ncells
for i in range(self.narrays):
pa = self.arrays[i]
np = pa.get_number_of_particles()
# cellids and indices are of length np and dtype uint32
cellids = numpy.ones(np, numpy.uint32)
indices = numpy.array(range(np), dtype=numpy.uint32)
# cell_counts is of length ncells + 1
cellc = numpy.ones(ncells + 1, numpy.uint32)
# store these in the dictionary for this particle array
self.cellids[ pa.name ] = cellids
self.indices[ pa.name ] = indices
self.cell_counts[ pa.name ] = cellc
if self.with_cl:
self._init_device_buffers()
def _init_device_buffers(self):
"""Initialize the device buffers
The arrays initialized here are the cell counts and
indices. The RadixSort object handles the keys and values.
"""
narrays = self.narrays
for i in range(narrays):
pa = self.arrays[i]
cellids = self.cellids[pa.name]
indices = self.indices[pa.name]
cellc = self.cell_counts[pa.name]
# Initialize the buffers
dcellids = cl.Buffer(self.context, mf.READ_WRITE|mf.COPY_HOST_PTR,
hostbuf=cellids)
dindices = cl.Buffer(self.context, mf.READ_WRITE|mf.COPY_HOST_PTR,
hostbuf=indices)
dcellc = cl.Buffer(self.context, mf.READ_WRITE|mf.COPY_HOST_PTR,
hostbuf=cellc)
self.dcellids[pa.name] = dcellids
self.dindices[pa.name] = dindices
self.dcell_counts[ pa.name ] = dcellc
def _setup_radix_sort(self):
"""Setup the RadixSort objects to be used.
Currently, only the AMDRadixSort is available which works on
both the CPU and the GPU. The NvidiaRadixSort works only on
Nvidia GPU's.
"""
narrays = self.narrays
rsort = self.rsort
if not self.with_cl:
for pa in self.arrays:
rsort[pa.name] = AMDRadixSort()
else:
ctx = self.context
for pa in self.arrays:
if clu.iscpucontext(ctx):
rsort[ pa.name ] = AMDRadixSort()
elif clu.isgpucontext(ctx):
#rsort[ pa.name ] = AMDRadixSort()
rsort[ pa.name ] = NvidiaRadixSort()
def _cl_update(self):
"""Update the data structures.
The following three steps are performed in order:
(a) The particles are binned using a standard algorithm like the one
for linked lists.
(b) Sort the resulting cellids (keys) and indices (values) using
the RadixSort objects
(c) Compute the cell counts by examining the sorted cellids
"""
# context and queue
ctx = self.context
q = self.queue
# get the cell limits
ncx, ncy, ncz = self.ncx, self.ncy, self.ncz
mcx, mcy, mcz = self.mcx, self.mcy, self.mcz
narrays = self.narrays
for i in range(narrays):
pa = self.arrays[i]
np = pa.get_number_of_particles()
# get launch parameters for this array
global_sizes = (np,1,1)
local_sizes = (1,1,1)
x = pa.get_cl_buffer("x")
y = pa.get_cl_buffer("y")
z = pa.get_cl_buffer("z")
# bin the particles to get device cellids
cellids = self.cellids[pa.name]
indices = self.indices[pa.name]
cellc = self.cell_counts[pa.name]
dcellids = self.dcellids[pa.name]
dindices = self.dindices[pa.name]
dcell_counts = self.dcell_counts[pa.name]
self.prog.bin( q, global_sizes, local_sizes,
x, y, z, dcellids, self.cell_size,
ncx, ncy, ncz, mcx, mcy, mcz ).wait()
# read the cellids into host array
clu.enqueue_copy(q, src=dcellids, dst=cellids)
# initialize the RadixSort with keys and values
keys = cellids
values = indices
rsort = self.rsort[ pa.name ]
rsort.initialize(keys, values, self.context)
# sort the keys (cellids) and values (indices)
rsort.sort()
sortedcellids = rsort.dkeys
self.prog.compute_cell_counts(q, global_sizes, local_sizes,
sortedcellids, dcell_counts,
numpy.uint32(self.ncells),
numpy.uint32(np)).wait()
# read the result back to host
# THIS MAY NEED TO BE DONE OR WE COULD SIMPLY LET IT RESIDE
# ON THE DEVICE.
clu.enqueue_copy(q, src=dcell_counts, dst=self.cell_counts[pa.name])
def _py_update(self):
"""Update the data structures using Python"""
cellsize = self.cell_size
cellsize1 = 1.0/cellsize
narrays = self.narrays
for i in range(narrays):
pa = self.arrays[i]
np = pa.get_number_of_particles()
# bin the particles
cellids = self.cellids[pa.name]
x, y, z = pa.get("x", "y", "z")
for j in range(np):
_ix = int(numpy.floor( x[j] * cellsize1 ))
_iy = int(numpy.floor( y[j] * cellsize1 ))
_iz = int(numpy.floor( z[j] * cellsize1 ))
cellids[j] = numpy.uint32( (_iz - self.mcz)*self.ncx*self.ncy + \
(_iy - self.mcy)*self.ncx + \
(_ix - self.mcx) )
# sort the cellids and indices
keys = cellids
values = self.indices[pa.name]
rsort = self.rsort[pa.name]
rsort._sort_cpu(keys, values)
# compute the cell_count array
cellc = self.cell_counts[pa.name]
cellids = keys
for j in range(np):
cellid = cellids[j]
if j == 0:
for k in range(cellid + 1):
cellc[k] = 0
elif j == (np - 1):
for k in range(cellid+1, self.ncells + 1):
cellc[k] = np
cellidm = cellids[j-1]
for k in range(cellid - cellidm):
cellc[cellid - k] = j
else:
cellidm = cellids[j-1]
for k in range(cellid - cellidm):
cellc[cellid - k] = j
def _setup_program(self):
""" Read the OpenCL kernel source file and build the program """
src_file = get_pysph_root() + '/base/radix_sort.cl'
src = cl_read(src_file, precision=self.cl_precision)
self.prog = cl.Program(self.context, src).build()
| [
[
1,
0,
0.001,
0.001,
0,
0.66,
0,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.0034,
0.0019,
0,
0.66,
0.0909,
529,
0,
6,
0,
0,
529,
0,
0
],
[
1,
0,
0.0058,
0.001,
0,
0.6... | [
"import numpy",
"from pysph.solver.cl_utils import cl_read, get_real, HAS_CL, get_pysph_root,\\\n create_some_context, enqueue_copy",
"import pysph.solver.cl_utils as clu",
"if HAS_CL:\n import pyopencl as cl\n mf = cl.mem_flags",
" import pyopencl as cl",
" mf = cl.mem_flags",
"from nnps... |
# OpenCL conditional imports
import pysph.solver.cl_utils as clu
if clu.HAS_CL:
import pyopencl as cl
mf = cl.mem_flags
import numpy as np
class Scan(object):
def __init__(self, GPUContext,
CommandQueue,
numElements):
# Constants
MAX_WORKGROUP_INCLUSIVE_SCAN_SIZE = 1024
MAX_LOCAL_GROUP_SIZE = 256
self.WORKGROUP_SIZE = 256
self.MAX_BATCH_ELEMENTS = 64 * 1048576; #64 * numElements
self.MIN_SHORT_ARRAY_SIZE = 4;
self.MAX_SHORT_ARRAY_SIZE = 4 * self.WORKGROUP_SIZE;
self.MIN_LARGE_ARRAY_SIZE = 8 * self.WORKGROUP_SIZE;
self.MAX_LARGE_ARRAY_SIZE = 4 * self.WORKGROUP_SIZE * self.WORKGROUP_SIZE;
self.size_uint = size_uint = np.uint32(0).nbytes
# OpenCL elements
self.cxGPUContext = GPUContext
self.cqCommandQueue = CommandQueue
self.mNumElements = numElements
mf = cl.mem_flags
if (numElements > MAX_WORKGROUP_INCLUSIVE_SCAN_SIZE):
self.d_Buffer = cl.Buffer(self.cxGPUContext, mf.READ_WRITE, np.int(numElements/MAX_WORKGROUP_INCLUSIVE_SCAN_SIZE * size_uint))
# Program
src_file = clu.get_pysph_root() + '/base/Scan_b.cl'
src = open(src_file).read()
cpProgram = cl.Program(self.cxGPUContext, src).build()
# Kernel
self.ckScanExclusiveLocal1 = cpProgram.scanExclusiveLocal1
self.ckScanExclusiveLocal2 = cpProgram.scanExclusiveLocal2
self.ckUniformUpdate = cpProgram.uniformUpdate
def scanExclusiveLarge(self, d_Dst, d_Src, batchSize, arrayLength):
# I
WORKGROUP_SIZE = self.WORKGROUP_SIZE
size = np.uint32(4 * WORKGROUP_SIZE)
n = (batchSize * arrayLength) / (4 * WORKGROUP_SIZE)
localWorkSize = (np.int(WORKGROUP_SIZE),)
globalWorkSize = (np.int((n * size) / 4), )
# create Local Memory
l_data1 = cl.LocalMemory(np.int(2 * WORKGROUP_SIZE * self.size_uint))
self.ckScanExclusiveLocal1(self.cqCommandQueue, globalWorkSize, localWorkSize,
d_Dst,
d_Src,
l_data1,
size).wait()
# II
size = np.uint32(arrayLength / (4 * WORKGROUP_SIZE))
n = batchSize
elements = np.uint32(n * size)
globalWorkSize = (self.iSnapUp(elements, WORKGROUP_SIZE),)
# create Local Memory
l_data2 = cl.LocalMemory(np.int(2 * WORKGROUP_SIZE * self.size_uint))
self.ckScanExclusiveLocal2(self.cqCommandQueue, globalWorkSize, localWorkSize,
self.d_Buffer,
d_Dst,
d_Src,
l_data2,
elements,
size).wait()
# III
n = (batchSize * arrayLength) / (4 * WORKGROUP_SIZE)
localWorkSize = (np.int(WORKGROUP_SIZE),)
globalWorkSize = (np.int(n * WORKGROUP_SIZE),)
self.ckUniformUpdate(self.cqCommandQueue, globalWorkSize, localWorkSize,
d_Dst,
self.d_Buffer).wait()
def iSnapUp(self, dividend, divisor):
rem = dividend%divisor
if (rem == 0):
return np.int(dividend)
else:
return np.int(dividend - rem + divisor)
| [
[
1,
0,
0.0208,
0.0104,
0,
0.66,
0,
529,
0,
1,
0,
0,
529,
0,
0
],
[
4,
0,
0.0521,
0.0312,
0,
0.66,
0.3333,
0,
7,
0,
0,
0,
0,
0,
0
],
[
1,
1,
0.0521,
0.0104,
1,
0.28... | [
"import pysph.solver.cl_utils as clu",
"if clu.HAS_CL:\n import pyopencl as cl\n mf = cl.mem_flags",
" import pyopencl as cl",
" mf = cl.mem_flags",
"import numpy as np",
"class Scan(object):\n def __init__(self, GPUContext,\n CommandQueue,\n numElements):\n\... |
#! python
'''
Module to collect and generate source files from template files
The template files have very similar syntax to php files.
* All text in input is copied straight to output except that within
`<?py` and `?>` tags.
* Text within `<?py=` and `?>` tags is evaluated and the result is written
into the output file as a string
* Text within `<?py` and `?>` tags is executed with a file-like object `out`
defined which can be written into using `out.write(<string>)`
* Note however that unlike php each code tag cannot extend across different
tags. For example you can ``NOT`` write a loop like:
..
<?py for i in range(5): ?>
In loop with i=<?py= i ?> .
<?py # End of loop ?>
* The imports and globals defined are persisted through all code sections
When used to locate source files as a main program:
The template files must have an extension '.src'.
The generated files have the name same as the src file but with the '.src'
extension removed and the last underscore '_' replaced with a dot '.'
Example: `carray_pyx.src` is generated into `carray.pyx`
'''
import os
import sys
import re
from StringIO import StringIO
def is_modified_later(filename1, filename2):
''' return `True` if the file1 is modified later than file2'''
return os.stat(filename1).st_mtime > os.stat(filename2).st_mtime
class FileGenerator(object):
'''class to generate source file from template'''
py_pattern = re.compile(r'''(?s)\<\?py(?P<code>.*?)\?\>''')
code_pattern = re.compile(r'''(?s)\<\?py(?!=)(?P<code>.*?)\?\>''')
expr_pattern = re.compile(r'''(?s)\<\?py=(?P<expr>.*?)\?\>''')
def generate_file_if_modified(self, infilename, outfilename, check=True):
'''generate source if template is modified later than the outfile
If `check` is True (default) then source is generated only if the
template has been modified later than the source file'''
if is_modified_later(infilename, outfilename):
self.generate_file(infilename, outfilename)
def generate_file(self, infile=sys.stdin, outfile=sys.stdout):
'''method to generate source file from a template file'''
inf = infile
outf = outfile
if isinstance(infile, type('')):
inf = open(infile, 'r')
if isinstance(outfile, type('')):
outf = open(outfile, 'w')
text = inf.read()
outtext = self.generate_output(text)
outf.write(outtext)
if isinstance(infile, type('')):
inf.close()
if isinstance(outfile, type('')):
outf.close()
def generate_output(self, intext):
'''generate output source as a string from given input template'''
self.dict = {}
return re.sub(self.py_pattern, self.sub_func, intext)
def sub_func(self, matchobj):
string = matchobj.group(0)
if string[4] == '=':
return str(self.get_expr_result(string[5:-3].strip()))
else:
return self.get_exec_output(string[4:-3].strip())
def get_exec_output(self, code_str):
'''the the output to a string `out` from execution of a code string'''
out = StringIO()
self.dict['out'] = out
exec code_str in self.dict
ret = out.getvalue()
out.close()
return ret
def get_expr_result(self, expr_str):
#out = StringIO()
#self.dict['out'] = out
ret = eval(expr_str, self.dict)
return ret
def get_src_files(dirname):
'''returns all files in directory having and extension `.src`'''
ls = os.listdir(dirname)
ls = [os.path.join(dirname,f) for f in ls if f.endswith('.src')]
return ls
def generate_files(src_files, if_modified=True):
'''generates source files from the template files with extension `.src`
If `if_modified` is True (default), the source file will be created only
if the template has been modified later than the source
'''
generator = FileGenerator()
for filename in src_files:
outfile = '.'.join(filename[:-4].rsplit('_',1))
if if_modified and not is_modified_later(filename, outfile):
print 'not',
print 'generating file %s from %s' %(outfile, filename)
generator.generate_file_if_modified(filename, outfile, if_modified)
def main(paths=None):
'''generates source files using template files
`args` is a list of `.src` template files to convert
if `args` is `None` all src files in this file's directory are converted
if `args` is an empty list all src files in current directory are converted
'''
if paths is None:
files = get_src_files(os.path.dirname(__file__))
elif len(paths)>0:
files = paths
else:
files = get_src_files(os.path.curdir)
generate_files(files)
if __name__ == '__main__':
import sys
if '--help' in sys.argv or '-h' in sys.argv:
print 'usage:'
print ' generator.py [filenames]'
print
print (' Convert template files with extension `.src` into '
'source files')
print (' If filenames is omitted all `.src` files in current '
'directory will be converted')
else:
main(sys.argv[1:])
| [
[
8,
0,
0.1199,
0.2192,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2397,
0.0068,
0,
0.66,
0.1,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.2466,
0.0068,
0,
0.66,
... | [
"'''\nModule to collect and generate source files from template files\n\nThe template files have very similar syntax to php files.\n\n * All text in input is copied straight to output except that within\n`<?py` and `?>` tags.",
"import os",
"import sys",
"import re",
"from StringIO import StringIO",
"def... |
import nnps_util as util
import pysph.solver.cl_utils as clu
import numpy
# PySPH imports
from carray import LongArray
#CHANGE
class OpenCLNeighborLocatorType:
AllPairNeighborLocator = 0
LinkedListSPHNeighborLocator = 1
RadixSortNeighborLocator = 2
class OpenCLNeighborLocator(object):
pass
class LinkedListSPHNeighborLocator(OpenCLNeighborLocator):
def __init__(self, manager, source, dest, scale_fac=2.0, cache=False):
""" Create a neighbor locator between a ParticleArray pair.
A neighbor locator interfaces with a domain manager which
provides an indexing scheme for the particles. The locator
knows how to interpret the information generated after the
domain manager's `update` function has been called.
For the locators based on linked lists as the domain manager,
the head and next arrays are used to determine the neighbors.
Note:
-----
Cython functions to retrieve nearest neighbors given a
destination particle index is only used when OpenCL support is
not available.
When OpenCL is available, the preferred approach is to
generate the neighbor loop code and kernel arguments and
inject this into the CL template files (done by CLCalc)
Parameters:
-----------
manager : DomainManager
The domain manager to use for locating neighbors
source, dest : ParticleArray
pair for which neighbors are sought.
scale_fac : REAL
Radius scale factor for non OpenCL runs.
cache : bool
Flag to indicate if neighbors are to be cached.
"""
self.manager = manager
self.source = source
self.dest = dest
self.scale_fac = scale_fac
self.with_cl = manager.with_cl
self.cache = cache
# Initialize the cache if using with Cython
self.particle_cache = []
if self.cache:
self._initialize_cache()
#######################################################################
# public interface
#######################################################################
def get_nearest_particles(self, i, output_array, exclude_index=-1):
""" Return nearest particles from source array to the dest point.
The search radius is the scale factor times the particle's h
Parameters:
-----------
i : int
The destination index
output_array : (in/out) LongArray
Neighbor indices are stored in this array.
exclude_index : int
Optional index to exclude from the neighbor list
NOTIMPLEMENTED!
"""
if self.cache:
return self.neighbor_cache[i]
else:
self._get_nearest_particles_nocahe(i, output_array)
##########################################################################
# non-public interface
##########################################################################
def _update(self):
""" Update the bin structure and compute cache contents.
Caching is only done if explicitly requested and should be
avoided for large problems to reduce the memory footprint.
"""
# update the domain manager
self.manager.update()
# set the cache if required
if self.cache:
self._initialize_cache()
self._udpdate_cache()
def _get_nearest_particles_nocahe(self, i, output_array, exclude_index=-1):
""" Use the linked list to get nearest neighbors.
The functions defined in `linked_list_functions.pyx` are used to
find the nearest neighbors.
Parameters:
-----------
i : (in) int
The destination particle index
output_array : (in/out) LongArray
Neighbor indices are stored in this array.
exclude_index : int
Optional index to exclude from the neighbor list
NOTIMPLEMENTED!
"""
manager = self.manager
src = self.source
dst = self.dest
# Enqueue a copy if the binning is done with OpenCL
manager.enqueue_copy()
# get the bin structure parameters
ncx = manager.ncx
ncy = manager.ncy
ncells = manager.ncells
# linked list for the source
head = manager.head[src.name]
next = manager.Next[src.name]
# cellid for the destination
cellid = manager.cellids[dst.name][i]
ix = manager.ix[dst.name][i]
iy = manager.iy[dst.name][i]
iz = manager.iz[dst.name][i]
# get all neighbors from the 27 neighboring cells
nbrs = util.ll_get_neighbors(cellid, ix, iy, iz,
ncx, ncy, ncells, head, next)
x = src.x.astype(numpy.float32)
y = src.y.astype(numpy.float32)
z = src.z.astype(numpy.float32)
xi = numpy.float32( dst.x[i] )
yi = numpy.float32( dst.y[i] )
zi = numpy.float32( dst.z[i] )
h = dst.h.astype(numpy.float32)
radius = self.scale_fac * h[i]
# filter the neighbors to within a cutoff radius
nbrs = util.filter_neighbors(xi, yi, zi, radius, x, y, z, nbrs)
output_array.resize( len(nbrs) )
output_array.set_data( nbrs )
def _initialize_cache(self):
""" Iniitialize the particle neighbor cache contents.
The particle cache is one LongArray for each destination particle.
"""
np = self.dest.get_number_of_particles()
self.particle_cache = [ LongArray() for i in range(np) ]
def _udpdate_cache(self):
""" Compute the contents of the cache """
np = self.dest.get_number_of_particles()
for i in range(np):
nbrs = self.particle_cache[i]
self._get_nearest_particles_nocahe(i, nbrs)
def neighbor_loop_code_start(self):
""" Return a string for the start of the neighbor loop code """
return """
// int idx = cix[dest_id];
// int idy = ciy[dest_id];
// int idz = ciz[dest_id];
int idx = cix[particle_id];
int idy = ciy[particle_id];
int idz = ciz[particle_id];
REAL tmp = ncx*ncy;
int src_id, cid;
for (int ix = idx-1; ix <= idx+1; ++ix )
{
for (int iy = idy-1; iy <= idy+1; ++iy)
{
for (int iz = idz-1; iz <= idz+1; ++iz)
{
if ( (ix >=0) && (iy >=0) && (iz >= 0) )
{
cid = (ix) + (iy * ncx) + (iz * tmp);
if ( cid < ncells )
{
src_id = head[ cid ];
while ( src_id != -1 )
"""
def neighbor_loop_code_end(self):
""" Return a string for the start of the neighbor loop code """
return """
} // if cid < ncells
} // if ix >= 0
} // for iz
} // for iy
} // for ix
"""
def neighbor_loop_code_break(self):
return "src_id = next[ src_id ]; "
def get_kernel_args(self):
""" Add the kernel arguments for the OpenCL template """
dst = self.dest
src = self.source
cellids = self.manager.dcellids[dst.name]
cix = self.manager.dix[dst.name]
ciy = self.manager.diy[dst.name]
ciz = self.manager.diz[dst.name]
#CHANGE
head = self.manager.dhead[src.name]
next = self.manager.dnext[src.name]
indices = self.manager.dindices[dst.name]
return {'int const ncx': self.manager.ncx,
'int const ncy': self.manager.ncy,
'int const ncells': self.manager.ncells,
'__global uint* cellids': cellids,
'__global uint* cix': cix,
'__global uint* ciy': ciy,
'__global uint* ciz': ciz,
'__global int* head': head,
'__global int* next': next,
'__global uint* indices': indices
}
class AllPairNeighborLocator(OpenCLNeighborLocator):
def __init__(self, source, dest, scale_fac=2.0, cache=False):
""" Create a neighbor locator between a ParticleArray pair.
A neighbor locator interfaces with a domain manager which
provides an indexing scheme for the particles. The locator
knows how to interpret the information generated after the
domain manager's `update` function has been called.
For the locators based on linked lists as the domain manager,
the head and next arrays are used to determine the neighbors.
Note:
-----
Cython functions to retrieve nearest neighbors given a
destination particle index is only used when OpenCL support is
not available.
When OpenCL is available, the preferred approach is to
generate the neighbor loop code and kernel arguments and
inject this into the CL template files (done by CLCalc)
Parameters:
-----------
source, dest : ParticleArray
pair for which neighbors are sought.
scale_fac : REAL
Radius scale factor for non OpenCL runs.
cache : bool
Flag to indicate if neighbors are to be cached.
"""
self.manager = None
self.source = source
self.dest = dest
self.scale_fac = scale_fac
self.with_cl = True
# Explicitly set the cache to false
self.cache = False
# Initialize the cache if using with Cython
self.particle_cache = []
# set the dirty bit to True
self.is_dirty = True
def neighbor_loop_code_start(self):
""" Return a string for the start of the neighbor loop code """
return "for (int src_id=0; src_id<nbrs; ++src_id)"
def neighbor_loop_code_end(self):
""" Return a string for the start of the neighbor loop code """
return """ """
def neighbor_loop_code_break(self):
return ""
def get_kernel_args(self):
""" Add the kernel arguments for the OpenCL template """
src = self.source
np = numpy.int32(src.get_number_of_particles())
#CHANGE
return {'int const nbrs': np,
'__global uint* indices': indices}
def update(self):
""" Update the bin structure and compute cache contents if
necessary."""
if self.is_dirty:
self.is_dirty = False
def update_status(self):
""" Update the dirty bit for the locator and the DomainManager"""
if not self.is_dirty:
self.is_dirty = self.source.is_dirty or self.dest.is_dirty
##############################################################################
#`RadixSortNeighborLocator` class
##############################################################################
class RadixSortNeighborLocator(OpenCLNeighborLocator):
"""Neighbor locator using the RadixSortManager as domain manager."""
def __init__(self, manager, source, dest, scale_fac=2.0, cache=False):
""" Construct a neighbor locator between a pair of arrays.
Parameters:
-----------
manager : DomainManager
The underlying domain manager used for the indexing scheme for the
particles.
source : ParticleArray
The source particle array from where neighbors are sought.
dest : ParticleArray
The destination particle array for whom neighbors are sought.
scale_fac : float
Maximum kernel scale factor to determine cell size for binning.
cache : bool
Flag to indicate if neighbors are to be cached.
"""
self.manager = manager
self.source = source
self.dest = dest
self.with_cl = manager.with_cl
self.scale_fac = scale_fac
self.cache = cache
# Initialize the cache if using with Cython
self.particle_cache = []
if self.cache:
self._initialize_cache()
#######################################################################
# public interface
#######################################################################
def get_nearest_particles(self, i, output_array, exclude_index=-1):
""" Return nearest particles from source array to the dest point.
The search radius is the scale factor times the particle's h
Parameters:
-----------
i : int
The destination index
output_array : (in/out) LongArray
Neighbor indices are stored in this array.
exclude_index : int
Optional index to exclude from the neighbor list
NOTIMPLEMENTED!
"""
if self.cache:
return self.neighbor_cache[i]
else:
self._get_nearest_particles_nocahe(i, output_array)
##########################################################################
# non-public interface
##########################################################################
def _update(self):
""" Update the bin structure and compute cache contents.
Caching is only done if explicitly requested and should be
avoided for large problems to reduce the memory footprint.
"""
# update the domain manager
self.manager.update()
# set the cache if required
if self.cache:
self._initialize_cache()
self._udpdate_cache()
def _get_nearest_particles_nocahe(self, i, output_array, exclude_index=-1):
""" Use the linked list to get nearest neighbors.
The functions defined in `linked_list_functions.pyx` are used to
find the nearest neighbors.
Parameters:
-----------
i : (in) int
The destination particle index
output_array : (in/out) LongArray
Neighbor indices are stored in this array.
exclude_index : int
Optional index to exclude from the neighbor list
NOTIMPLEMENTED!
"""
manager = self.manager
src = self.source
dst = self.dest
# Enqueue a copy if the binning is done with OpenCL
manager.enqueue_copy()
# get the bin structure parameters
ncx = manager.ncx
ncy = manager.ncy
ncells = manager.ncells
#CHANGE
# cell_counts and indices for the source
cellc = manager.cell_counts[ src.name ]
s_indices = manager.indices[ src.name ]
# destination indices
d_indices = manager.indices[ dst.name ]
# cellid for the destination particle
cellid = manager.cellids[dst.name][i]
# get all neighbors from the 27 neighboring cells
nbrs = util.rs_get_neighbors(cellid, ncx, ncy, ncells, cellc, s_indices)
xs = src.x.astype(numpy.float32)
ys = src.y.astype(numpy.float32)
zs = src.z.astype(numpy.float32)
xi = numpy.float32( dst.x[d_indices[i]] )
yi = numpy.float32( dst.y[d_indices[i]] )
zi = numpy.float32( dst.z[d_indices[i]] )
radius = numpy.float32( self.scale_fac * dst.h[d_indices[i]] )
# filter the neighbors to within a cutoff radius
nbrs = util.filter_neighbors(xi, yi, zi, radius, xs, ys, zs, nbrs)
output_array.resize( len(nbrs) )
output_array.set_data( nbrs )
def neighbor_loop_code_start(self):
return """// unflatten cellid
int idx, idy, idz;
int s_cid, src_id;
int start_id, end_id;
int d_cid = cellids[ dest_id ];
idz = convert_int_rtn( d_cid/(ncx*ncy) );
d_cid = d_cid - (idz * ncx*ncy);
idy = convert_int_rtn( d_cid/ncx );
idx = d_cid - (idy * ncx);
for (int ix = idx-1; ix <= idx+1; ix++)
{
for (int iy = idy-1; iy <= idy+1; iy++)
{
for (int iz = idz-1; iz <= idz+1; iz++)
{
if ( (ix >=0) && (iy >=0) && (iz >= 0) )
{
s_cid = (ix) + (iy * ncx) + (iz * ncx*ncy);
if ( s_cid < ncells )
{
start_id = cell_counts[ s_cid ];
end_id = cell_counts[ s_cid + 1 ];
for (int i=start_id; i<end_id; ++i)
{
src_id = src_indices[ i ];
"""
def neighbor_loop_code_end(self):
""" Return a string for the start of the neighbor loop code """
return """
} // for (start,end)
} // if cid < ncells
} // if ix >= 0
} // for iz
} // for iy
} // for ix
"""
def neighbor_loop_code_break(self):
return ""
def get_kernel_args(self):
""" Add the kernel arguments for the OpenCL template """
dst = self.dest
src = self.source
#CHANGE
# copying the buffers created in sort no dm!
cellids = self.manager.rsort[dst.name].dkeys
dst_indices = self.manager.rsort[dst.name].dvalues
src_indices = self.manager.rsort[src.name].dvalues
cell_counts = self.manager.dcell_counts[src.name]
return {'int const ncx': self.manager.ncx,
'int const ncy': self.manager.ncy,
'int const ncells': self.manager.ncells,
'__global uint* cellids': cellids,
'__global uint* cell_counts': cell_counts,
'__global uint* src_indices': src_indices,
'__global uint* indices': dst_indices
}
# import nnps_util as util
# import numpy
# # PySPH imports
# from carray import LongArray
# class OpenCLNeighborLocatorType:
# AllPairNeighborLocator = 0
# LinkedListSPHNeighborLocator = 1
# RadixSortNeighborLocator = 2
# class OpenCLNeighborLocator(object):
# pass
# class LinkedListSPHNeighborLocator(OpenCLNeighborLocator):
# def __init__(self, manager, source, dest, scale_fac=2.0, cache=False):
# """ Create a neighbor locator between a ParticleArray pair.
# A neighbor locator interfaces with a domain manager which
# provides an indexing scheme for the particles. The locator
# knows how to interpret the information generated after the
# domain manager's `update` function has been called.
# For the locators based on linked lists as the domain manager,
# the head and next arrays are used to determine the neighbors.
# Note:
# -----
# Cython functions to retrieve nearest neighbors given a
# destination particle index is only used when OpenCL support is
# not available.
# When OpenCL is available, the preferred approach is to
# generate the neighbor loop code and kernel arguments and
# inject this into the CL template files (done by CLCalc)
# Parameters:
# -----------
# manager : DomainManager
# The domain manager to use for locating neighbors
# source, dest : ParticleArray
# pair for which neighbors are sought.
# scale_fac : REAL
# Radius scale factor for non OpenCL runs.
# cache : bool
# Flag to indicate if neighbors are to be cached.
# """
# self.manager = manager
# self.source = source
# self.dest = dest
# self.scale_fac = scale_fac
# self.with_cl = manager.with_cl
# self.cache = cache
# # Initialize the cache if using with Cython
# self.particle_cache = []
# if self.cache:
# self._initialize_cache()
# #######################################################################
# # public interface
# #######################################################################
# def get_nearest_particles(self, i, output_array, exclude_index=-1):
# """ Return nearest particles from source array to the dest point.
# The search radius is the scale factor times the particle's h
# Parameters:
# -----------
# i : int
# The destination index
# output_array : (in/out) LongArray
# Neighbor indices are stored in this array.
# exclude_index : int
# Optional index to exclude from the neighbor list
# NOTIMPLEMENTED!
# """
# if self.cache:
# return self.neighbor_cache[i]
# else:
# self._get_nearest_particles_nocahe(i, output_array)
# ##########################################################################
# # non-public interface
# ##########################################################################
# def _update(self):
# """ Update the bin structure and compute cache contents.
# Caching is only done if explicitly requested and should be
# avoided for large problems to reduce the memory footprint.
# """
# # update the domain manager
# self.manager.update()
# # set the cache if required
# if self.cache:
# self._initialize_cache()
# self._udpdate_cache()
# def _get_nearest_particles_nocahe(self, i, output_array, exclude_index=-1):
# """ Use the linked list to get nearest neighbors.
# The functions defined in `linked_list_functions.pyx` are used to
# find the nearest neighbors.
# Parameters:
# -----------
# i : (in) int
# The destination particle index
# output_array : (in/out) LongArray
# Neighbor indices are stored in this array.
# exclude_index : int
# Optional index to exclude from the neighbor list
# NOTIMPLEMENTED!
# """
# manager = self.manager
# src = self.source
# dst = self.dest
# # Enqueue a copy if the binning is done with OpenCL
# manager.enqueue_copy()
# # get the bin structure parameters
# ncx = manager.ncx
# ncy = manager.ncy
# ncells = manager.ncells
# # linked list for the source
# head = manager.head[src.name]
# next = manager.Next[src.name]
# # cellid for the destination
# cellid = manager.cellids[dst.name][i]
# ix = manager.ix[dst.name][i]
# iy = manager.iy[dst.name][i]
# iz = manager.iz[dst.name][i]
# # get all neighbors from the 27 neighboring cells
# nbrs = util.ll_get_neighbors(cellid, ix, iy, iz,
# ncx, ncy, ncells, head, next)
# x = src.x.astype(numpy.float32)
# y = src.y.astype(numpy.float32)
# z = src.z.astype(numpy.float32)
# xi = numpy.float32( dst.x[i] )
# yi = numpy.float32( dst.y[i] )
# zi = numpy.float32( dst.z[i] )
# h = dst.h.astype(numpy.float32)
# radius = self.scale_fac * h[i]
# # filter the neighbors to within a cutoff radius
# nbrs = util.filter_neighbors(xi, yi, zi, radius, x, y, z, nbrs)
# output_array.resize( len(nbrs) )
# output_array.set_data( nbrs )
# def _initialize_cache(self):
# """ Iniitialize the particle neighbor cache contents.
# The particle cache is one LongArray for each destination particle.
# """
# np = self.dest.get_number_of_particles()
# self.particle_cache = [ LongArray() for i in range(np) ]
# def _udpdate_cache(self):
# """ Compute the contents of the cache """
# np = self.dest.get_number_of_particles()
# for i in range(np):
# nbrs = self.particle_cache[i]
# self._get_nearest_particles_nocahe(i, nbrs)
# def neighbor_loop_code_start(self):
# """ Return a string for the start of the neighbor loop code """
# return """
# int idx = cix[particle_id];
# int idy = ciy[particle_id];
# int idz = ciz[particle_id];
# REAL tmp = ncx*ncy;
# int src_id, cid;
# for (int ix = idx-1; ix <= idx+1; ++ix )
# {
# for (int iy = idy-1; iy <= idy+1; ++iy)
# {
# for (int iz = idz-1; iz <= idz+1; ++iz)
# {
# if ( (ix >=0) && (iy >=0) && (iz >= 0) )
# {
# cid = (ix) + (iy * ncx) + (iz * tmp);
# if ( cid < ncells )
# {
# src_id = head[ cid ];
# while ( src_id != -1 )
# """
# def neighbor_loop_code_end(self):
# """ Return a string for the start of the neighbor loop code """
# return """
# } // if cid < ncells
# } // if ix >= 0
# } // for iz
# } // for iy
# } // for ix
# """
# def neighbor_loop_code_break(self):
# return "src_id = next[ src_id ]; "
# def get_kernel_args(self):
# """ Add the kernel arguments for the OpenCL template """
# dst = self.dest
# src = self.source
# cellids = self.manager.dcellids[dst.name]
# cix = self.manager.dix[dst.name]
# ciy = self.manager.diy[dst.name]
# ciz = self.manager.diz[dst.name]
# head = self.manager.dhead[src.name]
# next = self.manager.dnext[src.name]
# indices = self.manager.dindices[dst.name]
# return {'int const ncx': self.manager.ncx,
# 'int const ncy': self.manager.ncy,
# 'int const ncells': self.manager.ncells,
# '__global uint* cellids': cellids,
# '__global uint* cix': cix,
# '__global uint* ciy': ciy,
# '__global uint* ciz': ciz,
# '__global int* head': head,
# '__global int* next': next,
# '__global uint* indices': indices
# }
# class AllPairNeighborLocator(OpenCLNeighborLocator):
# def __init__(self, source, dest, scale_fac=2.0, cache=False):
# """ Create a neighbor locator between a ParticleArray pair.
# A neighbor locator interfaces with a domain manager which
# provides an indexing scheme for the particles. The locator
# knows how to interpret the information generated after the
# domain manager's `update` function has been called.
# For the locators based on linked lists as the domain manager,
# the head and next arrays are used to determine the neighbors.
# Note:
# -----
# Cython functions to retrieve nearest neighbors given a
# destination particle index is only used when OpenCL support is
# not available.
# When OpenCL is available, the preferred approach is to
# generate the neighbor loop code and kernel arguments and
# inject this into the CL template files (done by CLCalc)
# Parameters:
# -----------
# source, dest : ParticleArray
# pair for which neighbors are sought.
# scale_fac : REAL
# Radius scale factor for non OpenCL runs.
# cache : bool
# Flag to indicate if neighbors are to be cached.
# """
# self.manager = None
# self.source = source
# self.dest = dest
# self.scale_fac = scale_fac
# self.with_cl = True
# # Explicitly set the cache to false
# self.cache = False
# # Initialize the cache if using with Cython
# self.particle_cache = []
# # set the dirty bit to True
# self.is_dirty = True
# def neighbor_loop_code_start(self):
# """ Return a string for the start of the neighbor loop code """
# return "for (int src_id=0; src_id<nbrs; ++src_id)"
# def neighbor_loop_code_end(self):
# """ Return a string for the start of the neighbor loop code """
# return """ """
# def neighbor_loop_code_break(self):
# return ""
# def get_kernel_args(self):
# """ Add the kernel arguments for the OpenCL template """
# src = self.source
# np = numpy.int32(src.get_number_of_particles())
# return {'int const nbrs': np,
# '__global uint* indices': indices
# }
# def update(self):
# """ Update the bin structure and compute cache contents if
# necessary."""
# if self.is_dirty:
# self.is_dirty = False
# def update_status(self):
# """ Update the dirty bit for the locator and the DomainManager"""
# if not self.is_dirty:
# self.is_dirty = self.source.is_dirty or self.dest.is_dirty
# ##############################################################################
# #`RadixSortNeighborLocator` class
# ##############################################################################
# class RadixSortNeighborLocator(OpenCLNeighborLocator):
# """Neighbor locator using the RadixSortManager as domain manager."""
# def __init__(self, manager, source, dest, scale_fac=2.0, cache=False):
# """ Construct a neighbor locator between a pair of arrays.
# Parameters:
# -----------
# manager : DomainManager
# The underlying domain manager used for the indexing scheme for the
# particles.
# source : ParticleArray
# The source particle array from where neighbors are sought.
# dest : ParticleArray
# The destination particle array for whom neighbors are sought.
# scale_fac : float
# Maximum kernel scale factor to determine cell size for binning.
# cache : bool
# Flag to indicate if neighbors are to be cached.
# """
# self.manager = manager
# self.source = source
# self.dest = dest
# self.with_cl = manager.with_cl
# self.scale_fac = scale_fac
# self.cache = cache
# # Initialize the cache if using with Cython
# self.particle_cache = []
# if self.cache:
# self._initialize_cache()
# #######################################################################
# # public interface
# #######################################################################
# def get_nearest_particles(self, i, output_array, exclude_index=-1):
# """ Return nearest particles from source array to the dest point.
# The search radius is the scale factor times the particle's h
# Parameters:
# -----------
# i : int
# The destination index
# output_array : (in/out) LongArray
# Neighbor indices are stored in this array.
# exclude_index : int
# Optional index to exclude from the neighbor list
# NOTIMPLEMENTED!
# """
# if self.cache:
# return self.neighbor_cache[i]
# else:
# self._get_nearest_particles_nocahe(i, output_array)
# ##########################################################################
# # non-public interface
# ##########################################################################
# def _update(self):
# """ Update the bin structure and compute cache contents.
# Caching is only done if explicitly requested and should be
# avoided for large problems to reduce the memory footprint.
# """
# # update the domain manager
# self.manager.update()
# # set the cache if required
# if self.cache:
# self._initialize_cache()
# self._udpdate_cache()
# def _get_nearest_particles_nocahe(self, i, output_array, exclude_index=-1):
# """ Use the linked list to get nearest neighbors.
# The functions defined in `linked_list_functions.pyx` are used to
# find the nearest neighbors.
# Parameters:
# -----------
# i : (in) int
# The destination particle index
# output_array : (in/out) LongArray
# Neighbor indices are stored in this array.
# exclude_index : int
# Optional index to exclude from the neighbor list
# NOTIMPLEMENTED!
# """
# manager = self.manager
# src = self.source
# dst = self.dest
# # Enqueue a copy if the binning is done with OpenCL
# manager.enqueue_copy()
# # get the bin structure parameters
# ncx = manager.ncx
# ncy = manager.ncy
# ncells = manager.ncells
# # cell_counts and indices for the source
# cellc = manager.cell_counts[ src.name ]
# s_indices = manager.indices[ src.name ]
# # destination indices
# d_indices = manager.indices[ dst.name ]
# # cellid for the destination particle
# cellid = manager.cellids[dst.name][i]
# # get all neighbors from the 27 neighboring cells
# nbrs = util.rs_get_neighbors(cellid, ncx, ncy, ncells, cellc, s_indices)
# xs = src.x.astype(numpy.float32)
# ys = src.y.astype(numpy.float32)
# zs = src.z.astype(numpy.float32)
# xi = numpy.float32( dst.x[d_indices[i]] )
# yi = numpy.float32( dst.y[d_indices[i]] )
# zi = numpy.float32( dst.z[d_indices[i]] )
# radius = numpy.float32( self.scale_fac * dst.h[d_indices[i]] )
# # filter the neighbors to within a cutoff radius
# nbrs = util.filter_neighbors(xi, yi, zi, radius, xs, ys, zs, nbrs)
# output_array.resize( len(nbrs) )
# output_array.set_data( nbrs )
# def neighbor_loop_code_start(self):
# return """// unflatten cellid
# int idx, idy, idz;
# int s_cid, src_id;
# int start_id, end_id;
# int d_cid = cellids[ dest_id ];
# idz = convert_int_rtn( d_cid/(ncx*ncy) );
# d_cid = d_cid - (idz * ncx*ncy);
# idy = convert_int_rtn( d_cid/ncx );
# idx = d_cid - (idy * ncx);
# for (int ix = idx-1; ix <= idx+1; ix++)
# {
# for (int iy = idy-1; iy <= idy+1; iy++)
# {
# for (int iz = idz-1; iz <= idz+1; iz++)
# {
# if ( (ix >=0) && (iy >=0) && (iz >= 0) )
# {
# s_cid = (ix) + (iy * ncx) + (iz * ncx*ncy);
# if ( s_cid < ncells )
# {
# start_id = cell_counts[ s_cid ];
# end_id = cell_counts[ s_cid + 1 ];
# for (int i=start_id; i<end_id; ++i)
# {
# src_id = src_indices[ i ];
# """
# def neighbor_loop_code_end(self):
# """ Return a string for the start of the neighbor loop code """
# return """
# } // for (start,end)
# } // if cid < ncells
# } // if ix >= 0
# } // for iz
# } // for iy
# } // for ix
# """
# def neighbor_loop_code_break(self):
# return ""
# def get_kernel_args(self):
# """ Add the kernel arguments for the OpenCL template """
# dst = self.dest
# src = self.source
# # copying the buffers created in sort no dm!
# cellids = self.manager.rsort[dst.name].dkeys
# dst_indices = self.manager.rsort[dst.name].dvalues
# src_indices = self.manager.rsort[src.name].dvalues
# cell_counts = self.manager.dcell_counts[src.name]
# return {'int const ncx': self.manager.ncx,
# 'int const ncy': self.manager.ncy,
# 'int const ncells': self.manager.ncells,
# '__global uint* cellids': cellids,
# '__global uint* cell_counts': cell_counts,
# '__global uint* src_indices': src_indices,
# '__global uint* indices': dst_indices
# }
| [
[
1,
0,
0.0008,
0.0008,
0,
0.66,
0,
501,
0,
1,
0,
0,
501,
0,
0
],
[
1,
0,
0.0017,
0.0008,
0,
0.66,
0.125,
529,
0,
1,
0,
0,
529,
0,
0
],
[
1,
0,
0.0034,
0.0008,
0,
0... | [
"import nnps_util as util",
"import pysph.solver.cl_utils as clu",
"import numpy",
"from carray import LongArray",
"class OpenCLNeighborLocatorType:\n AllPairNeighborLocator = 0\n LinkedListSPHNeighborLocator = 1\n RadixSortNeighborLocator = 2",
" AllPairNeighborLocator = 0",
" LinkedList... |
# standard imports
import numpy
# local imports
from pysph.base.particle_array import *
def generate_sample_dataset_1():
"""
Generate test test data.
Look at image test_cell_case1.png for details.
"""
x = numpy.array([0.25, 0.8, 0.5, 0.8, 0.2, 0.5, 1.5, 1.5])
y = numpy.array([0.25, 0.1, 0.5, 0.8, 0.9, 1.5, 0.5, 1.5])
z = numpy.array([0., 0, 0, 0, 0, 0, 0, 0])
h = numpy.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
parr1 = ParticleArray(name='parr1', **{'x':{'data':x}, 'y':{'data':y}, 'z':{'data':z}, 'h':{'data':h}})
x = numpy.array([0.2, 1.2, 1.5, 0.4])
y = numpy.array([0., 0, 0, 0])
z = numpy.array([1.6, 1.5, -0.5, 0.4])
h = numpy.array([1.0, 1.0, 1.0, 1.0])
parr2 = ParticleArray(name='parr2', **{'x':{'data':x}, 'y':{'data':y}, 'z':{'data':z}, 'h':{'data':h}})
return [parr1, parr2]
def generate_sample_dataset_2():
"""
Generate test test data.
Look at image test_cell_data2.png for details.
"""
x = numpy.array([-0.5, -0.5, 0.5, 0.5, 1.5, 2.5, 2.5])
y = numpy.array([2.5, -0.5, 1.5, 0.5, 0.5, 0.5, -0.5])
z = numpy.array([0., 0, 0, 0, 0, 0, 0])
h = numpy.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
parr1 = ParticleArray(name='parr1', **{'x':{'data':x}, 'y':{'data':y}, 'z':{'data':z}, 'h':{'data':h}})
return [parr1]
| [
[
1,
0,
0.0455,
0.0227,
0,
0.66,
0,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.1136,
0.0227,
0,
0.66,
0.3333,
505,
0,
1,
0,
0,
505,
0,
0
],
[
2,
0,
0.4091,
0.4773,
0,
... | [
"import numpy",
"from pysph.base.particle_array import *",
"def generate_sample_dataset_1():\n \"\"\"\n Generate test test data.\n\n Look at image test_cell_case1.png for details.\n \"\"\"\n x = numpy.array([0.25, 0.8, 0.5, 0.8, 0.2, 0.5, 1.5, 1.5])\n y = numpy.array([0.25, 0.1, 0.5, 0.8, 0.9,... |
"""
Tests for the particle array module.
"""
# standard imports
import unittest
import numpy
# local imports
import pysph
from pysph.base import particle_array
from pysph.base.carray import LongArray, IntArray, DoubleArray
from pysph.base import carray
import pickle
def check_array(x, y):
"""Check if two arrays are equal with an absolute tolerance of
1e-16."""
return numpy.allclose(x, y, atol=1e-16, rtol=0)
###############################################################################
# `ParticleArrayTest` class.
###############################################################################
class ParticleArrayTest(unittest.TestCase):
"""
Tests for the particle array class.
"""
def test_constructor(self):
"""
Test the constructor.
"""
# Default constructor test.
p = particle_array.ParticleArray(name='test_particle_array')
self.assertEqual(p.name, 'test_particle_array')
self.assertEqual(p.temporary_arrays == {}, True)
self.assertEqual(p.is_dirty, True)
self.assertEqual(p.properties.has_key('tag'), True)
self.assertEqual(p.properties['tag'].length, 0)
# Constructor with some properties.
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x},
y={'data':y},
z={'data':z},
m={'data':m},
h={'data':h})
self.assertEqual(p.name, '')
self.assertEqual(p.properties.has_key('x'), True)
self.assertEqual(p.properties.has_key('y'), True)
self.assertEqual(p.properties.has_key('z'), True)
self.assertEqual(p.properties.has_key('m'), True)
self.assertEqual(p.properties.has_key('h'), True)
# get the properties are check if they are the same
xarr = p.properties['x'].get_npy_array()
self.assertEqual(check_array(xarr, x), True)
yarr = p.properties['y'].get_npy_array()
self.assertEqual(check_array(yarr, y), True)
zarr = p.properties['z'].get_npy_array()
self.assertEqual(check_array(zarr, z), True)
marr = p.properties['m'].get_npy_array()
self.assertEqual(check_array(marr, m), True)
harr = p.properties['h'].get_npy_array()
self.assertEqual(check_array(harr, h), True)
# check if the 'tag' array was added.
self.assertEqual(p.properties.has_key('tag'), True)
self.assertEqual(p.properties.values()[0].length == len(x), True)
# Constructor with tags
tags = [0, 1, 0, 1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z},
tag={'data':tags,'type':'long'})
self.assertEqual(check_array(p.get('tag', only_real_particles=False),
[0,0,1,1]), True)
self.assertEqual(check_array(p.get('x', only_real_particles=False),
[1,3,2,4]), True)
self.assertEqual(check_array(p.get('y', only_real_particles=False),
[0,2,1,3]), True)
self.assertEqual(check_array(p.get('z', only_real_particles=False),
[0,0,0,0]), True)
# trying to create particle array without any values but some
# properties.
p = particle_array.ParticleArray(x={}, y={}, z={}, h={})
self.assertEqual(p.get_number_of_particles(), 0)
self.assertEqual(p.properties.has_key('x'), True)
self.assertEqual(p.properties.has_key('y'), True)
self.assertEqual(p.properties.has_key('z'), True)
self.assertEqual(p.properties.has_key('tag'), True)
# now trying to supply some properties with values and others without
p = particle_array.ParticleArray(x={'default':10.0}, y={'data':[1.0, 2.0]},
z={}, h={'data':[0.1, 0.1]})
self.assertEqual(p.get_number_of_particles(), 2)
self.assertEqual(check_array(p.x, [10., 10.]), True)
self.assertEqual(check_array(p.y, [1., 2.]), True)
self.assertEqual(check_array(p.z, [0, 0]), True)
self.assertEqual(check_array(p.h, [0.1, 0.1]), True)
def test_get_number_of_particles(self):
"""
Tests the get_number_of_particles of particles.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m}, h={'data':h})
self.assertEqual(p.get_number_of_particles(), 4)
def test_get(self):
"""
Tests the get function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m}, h={'data':h})
self.assertEqual(check_array(x, p.get('x')), True)
self.assertEqual(check_array(y, p.get('y')), True)
self.assertEqual(check_array(z, p.get('z')), True)
self.assertEqual(check_array(m, p.get('m')), True)
self.assertEqual(check_array(h, p.get('h')), True)
def test_set(self):
"""
Tests the set function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h})
# set the x array with new values
p.set(**{'x':[4., 3, 2, 1], 'h':[0.2, 0.2, 0.2, 0.2]})
self.assertEqual(check_array(p.get('x'), [4., 3, 2, 1]), True)
self.assertEqual(check_array(p.get('h'), [0.2, 0.2, 0.2, 0.2]), True)
# trying to set the tags
p.set(**{'tag':[0, 1, 1, 1]})
self.assertEqual(check_array(p.get('tag', only_real_particles=False)
, [0, 1, 1, 1]), True)
self.assertEqual(check_array(p.get('tag'), [0]), True)
# try setting array with smaller length array.
p.set(**{'x':[5, 6, 7]})
self.assertEqual(check_array(p.get('x', only_real_particles=False),
[5, 6, 7, 1]), True)
# try setting array with longer array.
self.assertRaises(ValueError, p.set, **{'x':[1., 2, 3, 5, 6]})
def test_add_temporary_array(self):
"""
Tests the add_temporary_array function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y}, z={'data':z}, m={'data':m}, h={'data':h})
# make sure the temporary_arrays dict is empty.
self.assertEqual(p.temporary_arrays, {})
# now add some temporary arrays.
p.add_temporary_array('temp1')
p.add_temporary_array('temp2')
# get the arrays and make sure they are of correct size.
self.assertEqual(p.get('temp1').size == 4, True)
self.assertEqual(p.get('temp2').size == 4, True)
# try to add temporary array with name as some property.
self.assertRaises(ValueError, p.add_temporary_array, 'x')
# try setting a temporary array.
p.set(**{'temp1':[2, 4, 3, 1]})
self.assertEqual(check_array(p.get('temp1'), [2, 4, 3, 1]), True)
def test_clear(self):
"""
Tests the clear function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h})
p.clear()
self.assertEqual(len(p.properties), 4)
self.assertEqual(p.properties.has_key('tag'), True)
self.assertEqual(p.properties['tag'].length, 0)
self.assertEqual(p.properties.has_key('group'), True)
self.assertEqual(p.properties['group'].length, 0)
self.assertEqual(p.properties.has_key('local'), True)
self.assertEqual(p.properties['local'].length, 0)
self.assertEqual(p.properties.has_key('pid'), True)
self.assertEqual(p.properties['pid'].length, 0)
self.assertEqual(p.is_dirty, True)
self.assertEqual(p.temporary_arrays, {})
def test_getattr(self):
"""
Tests the __getattr__ function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h})
self.assertEqual(check_array(x, p.x), True)
self.assertEqual(check_array(y, p.y), True)
self.assertEqual(check_array(z, p.z), True)
self.assertEqual(check_array(m, p.m), True)
self.assertEqual(check_array(h, p.h), True)
# try getting an non-existant attribute
self.assertRaises(AttributeError, p.__getattr__, 'a')
def test_setattr(self):
"""
Tests the __setattr__ function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h})
p.x = p.x*2.0
self.assertEqual(check_array(p.get('x'), [2., 4, 6, 8]), True)
p.x = p.x + 3.0*p.x
self.assertEqual(check_array(p.get('x'), [8., 16., 24., 32.]), True)
def test_remove_particles(self):
"""
Tests the remove_particles function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h})
p.add_temporary_array('tmp1')
remove_arr = LongArray(0)
remove_arr.append(0)
remove_arr.append(1)
p.remove_particles(remove_arr)
self.assertEqual(p.get_number_of_particles(), 2)
self.assertEqual(check_array(p.x, [3., 4.]), True)
self.assertEqual(check_array(p.y, [2., 3.]), True)
self.assertEqual(check_array(p.z, [0., 0.]), True)
self.assertEqual(check_array(p.m, [1., 1.]), True)
self.assertEqual(check_array(p.h, [.1, .1]), True)
self.assertEqual(len(p.tmp1), 2)
# now try invalid operatios to make sure errors are raised.
remove_arr.resize(10)
self.assertRaises(ValueError, p.remove_particles, remove_arr)
# now try to remove a particle with index more that particle
# length.
remove_arr.resize(1)
remove_arr[0] = 2
p.remove_particles(remove_arr)
# make sure no change occurred.
self.assertEqual(p.get_number_of_particles(), 2)
self.assertEqual(check_array(p.x, [3., 4.]), True)
self.assertEqual(check_array(p.y, [2., 3.]), True)
self.assertEqual(check_array(p.z, [0., 0.]), True)
self.assertEqual(check_array(p.m, [1., 1.]), True)
self.assertEqual(check_array(p.h, [.1, .1]), True)
self.assertEqual(len(p.tmp1), 2)
def test_add_particles(self):
"""
Tests the add_particles function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h})
p.add_temporary_array('tmp1')
p.set_dirty(False)
new_particles = {}
new_particles['x'] = numpy.array([5., 6, 7])
new_particles['y'] = numpy.array([4., 5, 6])
new_particles['z'] = numpy.array([0., 0, 0])
p.add_particles(**new_particles)
self.assertEqual(p.get_number_of_particles(), 7)
self.assertEqual(check_array(p.x, [1., 2, 3, 4, 5, 6, 7]), True)
self.assertEqual(check_array(p.y, [0., 1, 2, 3, 4, 5, 6]), True)
self.assertEqual(check_array(p.z, [0., 0, 0, 0, 0, 0, 0]), True)
self.assertEqual(p.is_dirty, True)
# make sure the other arrays were resized
self.assertEqual(len(p.h), 7)
self.assertEqual(len(p.m), 7)
self.assertEqual(len(p.tmp1), 7)
p.set_dirty(False)
# try adding an empty particle list
p.add_particles(**{})
self.assertEqual(p.get_number_of_particles(), 7)
self.assertEqual(check_array(p.x, [1., 2, 3, 4, 5, 6, 7]), True)
self.assertEqual(check_array(p.y, [0., 1, 2, 3, 4, 5, 6]), True)
self.assertEqual(check_array(p.z, [0., 0, 0, 0, 0, 0, 0]), True)
self.assertEqual(p.is_dirty, False)
# make sure the other arrays were resized
self.assertEqual(len(p.h), 7)
self.assertEqual(len(p.m), 7)
self.assertEqual(len(p.tmp1), 7)
# adding particles with tags
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h})
p.add_particles(x=[5, 6, 7, 8], tag=[1, 1, 0, 0])
self.assertEqual(p.get_number_of_particles(), 8)
self.assertEqual(check_array(p.x, [1, 2, 3, 4, 7, 8]), True)
self.assertEqual(check_array(p.y, [0, 1, 2, 3, 0, 0]), True)
self.assertEqual(check_array(p.z, [0, 0, 0, 0, 0, 0]), True)
def test_remove_tagged_particles(self):
"""
Tests the remove_tagged_particles function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
tag = [1, 1, 1, 0]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h}, tag={'data':tag})
p.add_temporary_array('tmp1')
print p.x, p.tag
p.remove_tagged_particles(0)
self.assertEqual(p.get_number_of_particles(), 3)
self.assertEqual(check_array(p.get('x', only_real_particles=False)
, [1, 2, 3.]), True)
print p.get('x', only_real_particles=False)
self.assertEqual(check_array(p.get('y', only_real_particles=False)
, [0., 1, 2]), True)
self.assertEqual(check_array(p.get('z', only_real_particles=False)
, [0., 0, 0]), True)
self.assertEqual(check_array(p.get('h', only_real_particles=False)
, [.1, .1, .1]), True)
self.assertEqual(check_array(p.get('m', only_real_particles=False)
, [1., 1., 1.]), True)
self.assertEqual(len(p.get('tmp1', only_real_particles=False)), 3)
self.assertEqual(check_array(p.x, []), True)
self.assertEqual(check_array(p.y, []), True)
self.assertEqual(check_array(p.z, []), True)
self.assertEqual(check_array(p.h, []), True)
self.assertEqual(check_array(p.m, []), True)
self.assertEqual(check_array(p.tmp1, []), True)
def test_add_property(self):
"""
Tests the add_property function.
"""
x = [1, 2, 3, 4.]
y = [0., 1., 2., 3.]
z = [0., 0., 0., 0.]
m = [1., 1., 1., 1.]
h = [.1, .1, .1, .1]
tag = [0, 0, 0, 0]
p = particle_array.ParticleArray(x={'data':x}, y={'data':y},
z={'data':z}, m={'data':m},
h={'data':h}, tag={'data':tag})
p.add_property({'name':'x'})
# make sure the current 'x' property is intact.
self.assertEqual(check_array(p.x, x), True)
# add a property with complete specification
p.add_property({'name':'f1',
'data':[1, 1, 2, 3],
'type':'int',
'default':4})
self.assertEqual(check_array(p.f1, [1, 1, 2, 3]), True)
self.assertEqual(type(p.properties['f1']), IntArray)
self.assertEqual(p.default_values['f1'], 4)
# add a property without specifying the type
p.add_property({'name':'f2',
'data':[1, 1, 2, 3],
'default':4.0})
self.assertEqual(type(p.properties['f2']), DoubleArray)
self.assertEqual(check_array(p.f2, [1, 1, 2, 3]), True)
p.add_property({'name':'f3'})
self.assertEqual(type(p.properties['f3']), DoubleArray)
self.assertEqual(p.properties['f3'].length, p.get_number_of_particles())
self.assertEqual(check_array(p.f3, [0, 0, 0, 0]), True)
p.add_property({'name':'f4', 'default':3.0})
self.assertEqual(type(p.properties['f4']), DoubleArray)
self.assertEqual(p.properties['f4'].length, p.get_number_of_particles())
self.assertEqual(check_array(p.f4, [3, 3, 3, 3]), True)
def test_extend(self):
"""
Tests the extend function.
"""
p = particle_array.ParticleArray(default_particle_tag=10, x={},
y={'default':-1.})
p.extend(5)
self.assertEqual(p.get_number_of_particles(), 5)
self.assertEqual(check_array(p.get(
'x', only_real_particles=False), [0, 0, 0, 0, 0]), True)
self.assertEqual(check_array(p.get('y', only_real_particles=False),
[-1., -1., -1., -1., -1.]), True)
self.assertEqual(check_array(p.get('tag', only_real_particles=False),
[10, 10, 10, 10, 10]), True)
def test_align_particles(self):
"""
Tests the align particles function.
"""
p = particle_array.ParticleArray()
p.add_property({'name':'x', 'data':[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
p.add_property({'name':'y', 'data':[10, 9, 8, 7, 6, 5, 4, 3, 2, 1]})
p.set_dirty(False)
p.set(**{'tag':[0, 0, 1, 1, 1, 0, 4, 0, 1, 5]})
self.assertEqual(check_array(p.get('x', only_real_particles=False),
[1, 2, 6, 8, 5, 3, 7, 4, 9, 10]),
True)
self.assertEqual(check_array(p.get('y', only_real_particles=False),
[10, 9, 5, 3, 6, 8, 4, 7, 2, 1]), True)
self.assertEqual(p.is_dirty, True)
p.set_dirty(False)
p.set(**{'tag':[0, 0, 0, 0, 1, 1, 1, 1, 1, 1]})
self.assertEqual(check_array(p.get('x', only_real_particles=False),
[1, 2, 6, 8, 5, 3, 7, 4, 9, 10]),
True)
self.assertEqual(check_array(p.get('y', only_real_particles=False),
[10, 9, 5, 3, 6, 8, 4, 7, 2, 1]), True)
self.assertEqual(p.is_dirty, False)
def test_append_parray(self):
"""
Tests the append_parray function.
"""
p1 = particle_array.ParticleArray()
p1.add_property({'name':'x', 'data':[1, 2, 3]})
p1.align_particles()
p2 = particle_array.ParticleArray(x={'data':[4, 5, 6]},
y={'data':[1, 2, 3 ]},
tag={'data':[1, 0, 1]})
p1.append_parray(p2)
print p1.get('x', only_real_particles=False)
print p1.get('y', only_real_particles=False)
print p1.get('tag', only_real_particles=False)
self.assertEqual(p1.get_number_of_particles(), 6)
self.assertEqual(check_array(p1.x, [1, 2, 3, 5]), True)
self.assertEqual(check_array(p1.y, [0, 0, 0, 2]), True)
self.assertEqual(check_array(p1.tag, [0, 0, 0, 0]), True)
def test_copy_properties(self):
"""
Tests the copy properties function.
"""
p1 = particle_array.ParticleArray()
p1.add_property({'name':'x', 'data':[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
p1.add_property({'name':'y'})
p1.add_property({'name':'t'})
p1.align_particles()
p2 = particle_array.ParticleArray()
p2.add_property({'name':'t', 'data':[-1, -1, -1, -1]})
p2.add_property({'name':'s', 'data':[2, 3, 4, 5]})
p2.align_particles()
p1.copy_properties(p2, start_index=5, end_index=9)
self.assertEqual(check_array(p1.t, [0, 0, 0, 0, 0, -1, -1, -1, -1, 0]),
True)
p1.add_property({'name':'s'})
p1.copy_properties(p2, start_index=5, end_index=9)
self.assertEqual(check_array(p1.t, [0, 0, 0, 0, 0, -1, -1, -1, -1, 0]),
True)
self.assertEqual(check_array(p1.s, [0, 0, 0, 0, 0, 2, 3, 4, 5, 0]), True)
def test_pickle(self):
"""
Tests the pickle and unpicle functions
"""
p1 = particle_array.ParticleArray()
x = range(10)
p1.add_property({'name':'x', 'data':[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
p1.add_property({'name':'y'})
p1.add_property({'name':'t'})
p1.align_particles()
s = pickle.dumps(p1)
p2 = pickle.loads(s)
self.assertEqual(len(p1.x), len(p2.x))
check_array(p1.x, p2.x)
if __name__ == '__main__':
import logging
logger = logging.getLogger()
ch = logging.StreamHandler()
logger.addHandler(ch)
unittest.main()
| [
[
8,
0,
0.0034,
0.0051,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0102,
0.0017,
0,
0.66,
0.1,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0119,
0.0017,
0,
0.66,
... | [
"\"\"\"\nTests for the particle array module.\n\"\"\"",
"import unittest",
"import numpy",
"import pysph",
"from pysph.base import particle_array",
"from pysph.base.carray import LongArray, IntArray, DoubleArray",
"from pysph.base import carray",
"import pickle",
"def check_array(x, y):\n \"\"\"C... |
class ParticleType:
"""
An empty class to provide an enum for the different particle types
used in PySPH.
The types defined are:
Fluid -- The default particle type used to represent fluids.
Solid -- Use this to represent solids
DummyFluid --
Probe --
Boundary -- Boundary particles that contribute to forces but
inherit properties from other particles. Use this to avoid
particle deficiency near boundaries.
"""
Fluid = 0
Solid = 1
DummyFluid = 2
Probe = 3
Boundary = 4
def __init__(self):
"""
Constructor.
We do not allow this class to be instantiated. Only the class attributes
are directly accessed. Instantiation will raise an error.
"""
raise SystemError, 'Do not instantiate the ParticleType class'
| [
[
3,
0,
0.4595,
0.8919,
0,
0.66,
0,
894,
0,
1,
0,
0,
0,
0,
0
],
[
8,
1,
0.2973,
0.5135,
1,
0.33,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
1,
0.5676,
0.027,
1,
0.33,
0... | [
"class ParticleType:\n \"\"\"\n An empty class to provide an enum for the different particle types\n used in PySPH.\n\n The types defined are:\n\n Fluid -- The default particle type used to represent fluids.",
" \"\"\"\n An empty class to provide an enum for the different particle types\n ... |
from cell import CellManager
from nnps import NNPSManager, NeighborLocatorType
from particle_array import ParticleArray
from particle_types import ParticleType
from domain_manager import DomainManagerType as CLDomain
from locator import OpenCLNeighborLocatorType as CLLocator
import locator
import domain_manager
from pysph.solver.cl_utils import HAS_CL
if HAS_CL:
import pyopencl as cl
Fluid = ParticleType.Fluid
Solid = ParticleType.Solid
Probe = ParticleType.Probe
DummyFluid = ParticleType.DummyFluid
Boundary = ParticleType.Boundary
SPHNeighborLocator = NeighborLocatorType.SPHNeighborLocator
# MPI conditional imports
HAS_MPI = True
try:
from mpi4py import MPI
except ImportError:
HAS_MPI = False
else:
from pysph.parallel.parallel_cell import ParallelCellManager
import numpy
class Particles(object):
""" A collection of particles and related data structures that
hat define an SPH simulation.
In pysph, particle properties are stored in a ParticleArray. The
array may represent a particular type of particle (solid, fluid
etc). Valid types are defined in base.particle_types.
Indexing of the particles is performed by a CellManager and
nearest neighbors are obtained via an instance of NNPSManager.
Particles is a collection of these data structures to provide a
single point access to
(a) Hold all particle information
(b) Update the indexing scheme when particles have moved.
(d) Update remote particle properties in parallel runs.
(e) Barrier synchronizations across processors
Data Attributes:
----------------
arrays -- a list of particle arrays in the simulation.
cell_manager -- the CellManager for spatial indexing.
nnps_manager -- the NNPSManager for neighbor queries.
correction_manager -- a kernel KernelCorrectionManager if kernel
correction is used. Defaults to None
misc_prop_update_functions -- A list of functions to evaluate
properties at the beginning of a sub step.
variable_h -- boolean indicating if variable smoothing lengths are
considered. Defaults to False
in_parallel -- boolean indicating if running in parallel. Defaults to False
load_balancing -- boolean indicating if load balancing is required.
Defaults to False.
pid -- processor id if running in parallel
Example:
---------
In [1]: import pysph.base.api as base
In [2]: x = linspace(-pi,pi,101)
In [3]: pa = base.get_particle_array(x=x)
In [4]: particles = base.Particles(arrays=[pa], in_parallel=True,
load_balancing=False, variable_h=True)
Notes:
------
An appropriate cell manager (CellManager/ParallelCellManager) is
created with reference to the 'in_parallel' attribute.
Similarly an appropriate NNPSManager is created with reference to
the 'variable_h' attribute.
"""
def __init__(self, arrays=[], in_parallel=False, variable_h=False,
load_balancing=True,
locator_type = SPHNeighborLocator,
periodic_domain=None,
min_cell_size=-1,
max_cell_size=0,
max_radius_scale=2,
update_particles=True):
""" Constructor
Parameters:
-----------
arrays -- list of particle arrays in the simulation
in_parallel -- flag for parallel runs
variable_h -- flag for variable smoothing lengths
load_balancing -- flag for dynamic load balancing.
periodic_domain -- the periodic domain for periodicity
"""
# set the flags
self.variable_h = variable_h
self.in_parallel = in_parallel
self.load_balancing = load_balancing
self.locator_type = locator_type
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.periodic_domain = periodic_domain
self.parallel_manager = None
self.max_radius_scale = max_radius_scale
# Some sanity checks on the input arrays.
assert len(arrays) > 0, "Particles must be given some arrays!"
prec = arrays[0].cl_precision
msg = "All arrays must have the same cl_precision"
for arr in arrays[1:]:
assert arr.cl_precision == prec, msg
self.arrays = arrays
self.kernel = None
# set defaults
self.correction_manager = None
self.misc_prop_update_functions = []
# initialize the cell manager and nnps manager
self.initialize()
def initialize(self):
""" Perform all initialization tasks here """
# create the cell manager
#if not self.in_parallel:
self.cell_manager = CellManager(arrays_to_bin=self.arrays,
min_cell_size=self.min_cell_size,
max_cell_size=self.max_cell_size,
max_radius_scale=self.max_radius_scale,
periodic_domain=self.periodic_domain)
#else:
# self.cell_manager = ParallelCellManager(
# arrays_to_bin=self.arrays, load_balancing=self.load_balancing)
#self.pid = self.cell_manager.pid
# create the nnps manager
self.nnps_manager = NNPSManager(cell_manager=self.cell_manager,
variable_h=self.variable_h,
locator_type=self.locator_type)
# call an update on the particles (i.e index)
self.update()
def update(self, cache_neighbors=False):
""" Update the status of the Particles.
Parameters:
-----------
cache_neighbors -- flag for caching kernel interactions
Notes:
-------
This function must be called whenever particles have moved and
the indexing structure invalid. After a call to this function,
particle neighbors will be accurately returned.
Since particles move at the end of an integration
step/sub-step, we may perform any other operation that would
be required for the subsequent step/sub-step. Examples of
these are summation density, equation of state, smoothing
length updates, evaluation of velocity divergence/vorticity
etc.
All other properties may be updated by appending functions to
the list 'misc_prop_update_functions'. These functions must
implement an 'eval' method which takes no arguments. An example
is the UpdateDivergence function in 'sph.update_misc_props.py'
"""
pm = self.parallel_manager
if pm is not None:
pm.update()
err = self.nnps_manager.py_update()
assert err != -1, 'NNPSManager update failed! '
# update any other properties (rho, p, cs, div etc.)
self.evaluate_misc_properties()
# evaluate kernel correction terms
if self.correction_manager:
self.correction_manager.update()
def evaluate_misc_properties(self):
""" Evaluate properties from the list of functions. """
for func in self.misc_prop_update_functions:
func.eval()
def add_misc_function(self, func):
""" Add a function to be performed when particles are updated
Parameters:
-----------
func -- The function to perform.
Example:
--------
The conduction coefficient required for the artificial heat
requires the velocity divergence at a particle. This must be
available at the start of every substep of an integration step.
"""
#calcs = operation.get_calcs(self, kernel)
self.misc_prop_update_functions.append(func)
def get_named_particle_array(self, name):
""" Return the named particle array if it exists """
has_array = False
for array in self.arrays:
if array.name == name:
arr = array
has_array = True
if has_array:
return arr
else:
print 'Array %s does not exist!' %(name)
def update_remote_particle_properties(self, props=None):
""" Perform a remote particle property update.
This function needs to be called when the remote particles
on one processor need to be updated on account of computations
on another physical processor.
"""
if self.in_parallel:
self.parallel_manager.update_remote_particle_properties(props=props)
def barrier(self):
""" Synchronize all processes """
if self.in_parallel:
self.parallel_manager.parallel_controller.comm.barrier()
def get_global_min_max(self, props):
""" Find the global minimum and maximum values.
Parameters:
-----------
props : dict
A dict of local properties for which we want global values.
"""
data_min = {}
data_max = {}
for prop in props:
data_min[prop] = props[prop]
data_max[prop] = props[prop]
pc = self.parallel_manager.parallel_controller
glb_min, glb_max = pc.get_glb_min_max(data_min, data_max)
return glb_min, glb_max
@classmethod
def get_neighbor_particle_locator(self, src, dst,
locator_type = SPHNeighborLocator,
variable_h=False, radius_scale=2.0):
""" Return a neighbor locator from the NNPSManager """
cell_manager = CellManager(arrays_to_bin=[src, dst])
nnps_manager = NNPSManager(cell_manager, locator_type=locator_type,
variable_h=variable_h)
return nnps_manager.get_neighbor_particle_locator(
src, dst, radius_scale)
class CLParticles(Particles):
""" A collection of ParticleArrays for use with OpenCL.
CLParticles is modelled very closely on `Particles` which is
intended for Cython computations.
Use CLParticles when using a CLCalc with OpenCL.
Attributes:
-----------
arrays : list
The list of arrays considered in the solution
with_cl : bool {True}
Duh
domain_manager_type : int base.DomainManagerType
A domain manager is used to spatially index the particles and provide
an interface which is accesible and comprehensible to an appropriate
OpenCLNeighborLocator object.
Acceptable values are:
(1) base.DomainManagerType.LinkedListManager : Indexing based on the
linked list structure defined by Hockney and Eastwood.
(2) base.DomainManagerType.DomainManager : No indexing. Intended to
be used for all pair neighbor searches.
cl_locator_type : int base.OpenCLNeighborLocatorType
A neighbor locator is in cahoots with the DomainManager to provide
near neighbors for a particle upon a query.
Acceptable values are:
(1) base.OpenCLNeighborLocatorType.LinkedListSPHNeighborLocator :
A neighbor locator that uses the linked list structure of the
LinkedListManager to provide neighbors in an SPH context. That
is, nearest neighbors are particles in the 27 neighboring cells
for the destination particle.
(2) base.OpenCLNeighborLocatorType.AllPairNeighborLocator :
A trivial locator that essentially returns all source particles
as near neighbors for any query point.
"""
def __init__(self, arrays,
domain_manager_type=CLDomain.DomainManager,
cl_locator_type=CLLocator.AllPairNeighborLocator):
self.arrays = arrays
self.with_cl = True
self.domain_manager_type = domain_manager_type
self.cl_locator_type = cl_locator_type
self.in_parallel = False
def get_cl_precision(self):
"""Return the cl_precision used by the Particle Arrays.
This property cannot be set it is set at construction time for
the Particle arrays. This is simply a convenience function to
query the cl_precision.
"""
# ensure that all arrays have the same precision
narrays = len(self.arrays)
if ( narrays > 1 ):
for i in range(1, narrays):
assert self.arrays[i-1].cl_precision == \
self.arrays[i].cl_precision
return self.arrays[0].cl_precision
def setup_cl(self, context):
""" OpenCL setup given a context.
Parameters:
-----------
context : pyopencl.Context
The context is used to instantiate the domain manager, the
type of which is determined from the attribute
`domain_manager_type`.
I expect this function to be called from the associated
CLCalc, from within it's `setup_cl` method. The point is that
the same context is used for the Calc, the DomainManager and
the underlying ParticleArrays. This is important as a mix of
contexts will result in crashes.
The DomainManager is updated after creation. This means that
the data is ready to be used by the SPHFunction OpenCL
kernels.
"""
self.with_cl = True
self.context = context
# create the domain manager.
self.domain_manager = self.get_domain_manager(context)
# Update the domain manager
self.domain_manager.update()
def get_domain_manager(self, context):
""" Get the domain manager from type. """
if self.domain_manager_type == CLDomain.DomainManager:
return domain_manager.DomainManager(
arrays = self.arrays, context = context
)
if self.domain_manager_type == CLDomain.LinkedListManager:
return domain_manager.LinkedListManager(
arrays=self.arrays, context = context
)
if self.domain_manager_type == CLDomain.RadixSortManager:
return domain_manager.RadixSortManager(
arrays=self.arrays, context = context
)
else:
msg = "Manager type %s not understood!"%(self.domain_manager_type)
raise ValueError(msg)
def get_neighbor_locator(self, source, dest, scale_fac=2.0):
""" Return an OpenCLNeighborLocator between a source and
destination.
Parameters:
-----------
source : ParticleArray
The source particle array
dest : ParticleArray
The destination particle array
scale_fac : float
NOTIMPLEMENTED. The scale facor to determine the effective
cutoff radius.
Note:
-----
An error is raised if a linked list neighbor locator is
requested with a domain manager other than the
LinkedListManager.
"""
if self.cl_locator_type == \
CLLocator.AllPairNeighborLocator:
return locator.AllPairNeighborLocator(source=source, dest=dest)
if self.cl_locator_type == \
CLLocator.LinkedListSPHNeighborLocator:
if not self.domain_manager_type == \
CLDomain.LinkedListManager:
raise RuntimeError
return locator.LinkedListSPHNeighborLocator(
manager=self.domain_manager, source=source, dest=dest,
scale_fac=scale_fac)
if self.cl_locator_type == \
CLLocator.RadixSortNeighborLocator:
if not self.domain_manager_type == \
CLDomain.RadixSortManager:
raise RuntimeError
return locator.RadixSortNeighborLocator(
manager=self.domain_manager, source=source, dest=dest,
scale_fac=scale_fac)
def update(self):
""" Update the spatial index of the particles.
First check if the domain manager needs an update by calling
it's update_status method and then proceed with the update.
The reason this is done is to avoid any repeated updates.
"""
self.domain_manager.update()
def read_from_buffer(self):
""" Read the buffer contents for all the arrays """
for pa in self.arrays:
pa.read_from_buffer()
###############################################################################
| [
[
1,
0,
0.0019,
0.0019,
0,
0.66,
0,
787,
0,
1,
0,
0,
787,
0,
0
],
[
1,
0,
0.0039,
0.0019,
0,
0.66,
0.05,
4,
0,
2,
0,
0,
4,
0,
0
],
[
1,
0,
0.0058,
0.0019,
0,
0.66,
... | [
"from cell import CellManager",
"from nnps import NNPSManager, NeighborLocatorType",
"from particle_array import ParticleArray",
"from particle_types import ParticleType",
"from domain_manager import DomainManagerType as CLDomain",
"from locator import OpenCLNeighborLocatorType as CLLocator",
"import lo... |
"""API module to simplify import of common names from pysph.base package"""
# fast utils
from fast_utils import arange_long
# carray
from carray import LongArray, DoubleArray, IntArray, FloatArray
from cell import Cell, CellManager, PeriodicDomain
from kernels import KernelBase, DummyKernel, CubicSplineKernel, \
HarmonicKernel, GaussianKernel, M6SplineKernel, W8Kernel, W10Kernel,\
QuinticSplineKernel, WendlandQuinticSplineKernel, Poly6Kernel
from nnps import NbrParticleLocatorBase, FixedDestNbrParticleLocator, \
VarHNbrParticleLocator, NNPSManager, brute_force_nnps
from nnps import NeighborLocatorType
from particle_array import ParticleArray, get_particle_array
from particles import Particles, CLParticles
from point import Point, IntPoint
# ParticleTypes
from particle_types import ParticleType
Fluid = ParticleType.Fluid
Solid = ParticleType.Solid
Boundary = ParticleType.Boundary
Probe = ParticleType.Probe
DummyFluid = ParticleType.DummyFluid
from geometry import MeshPoint, Line, Geometry
# LinkedListManager
from domain_manager import LinkedListManager, DomainManager, RadixSortManager, \
DomainManagerType
# OpenCL locator
from locator import OpenCLNeighborLocator, LinkedListSPHNeighborLocator, \
AllPairNeighborLocator, RadixSortNeighborLocator
from locator import OpenCLNeighborLocatorType
# radix sort
from radix_sort import AMDRadixSort
import nnps_util
| [
[
8,
0,
0.0208,
0.0208,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0833,
0.0208,
0,
0.66,
0.0476,
525,
0,
1,
0,
0,
525,
0,
0
],
[
1,
0,
0.1458,
0.0208,
0,
0.66... | [
"\"\"\"API module to simplify import of common names from pysph.base package\"\"\"",
"from fast_utils import arange_long",
"from carray import LongArray, DoubleArray, IntArray, FloatArray",
"from cell import Cell, CellManager, PeriodicDomain",
"from kernels import KernelBase, DummyKernel, CubicSplineKernel,... |
''' Implement infrastructure for the solver to add various interfaces '''
from functools import wraps, partial
import threading, thread
from pysph.base.particle_array import ParticleArray
import logging
logger = logging.getLogger()
class DummyComm(object):
''' A dummy MPI.Comm implementation as placeholder for for serial runs '''
def Get_size(self):
''' return the size of the comm (1) '''
return 1
def Get_rank(self):
''' return the rank of the process (0) '''
return 0
def send(self, data, pid):
''' dummy send implementation '''
self.data = data
def recv(self, pid):
''' dummy recv implementation '''
data = self.data
del self.data
return data
def bcast(self, data):
''' bcast (broadcast) implementation for serial run '''
return data
def gather(self, data):
''' gather implementation for serial run '''
return [data]
def synchronized(lock_or_func):
''' decorator for synchronized (thread safe) function
Usage:
- sync_func = synchronized(lock)(func) # sync with an existing lock
- sync_func = synchronized(func) # sync with a new private lock
'''
if isinstance(lock_or_func, thread.LockType):
lock = lock_or_func
def synchronized_inner(func):
@wraps(func)
def wrapped(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return wrapped
return synchronized_inner
else:
func = lock_or_func
lock = threading.Lock()
return synchronized(lock)(func)
def wrap_dispatcher(obj, meth, *args2, **kwargs2):
@wraps(meth)
def wrapped(*args, **kwargs):
kw = {}
kw.update(kwargs2)
kw.update(kwargs)
return meth(obj.block, *(args2+args), **kw)
return wrapped
class Controller(object):
''' Controller class acts a a proxy to control the solver
This is passed as an argument to the interface
**Methods available**:
- get -- get the value of a solver parameter
- set -- set the value of a solver parameter
- get_result -- return result of a queued command
- pause_on_next -- pause solver thread on next iteration
- wait -- wait (block) calling thread till solver is paused
(call after `pause_on_next`)
- cont -- continue solver thread (call after `pause_on_next`)
Various other methods are also available as listed in
:data:`CommandManager.dispatch_dict` which perform different functions.
- The methods in CommandManager.active_methods do their operation and return
the result (if any) immediately
- The methods in CommandManager.lazy_methods do their later when solver
thread is available and return a task-id. The result of the task can be
obtained later using the blocking call `get_result()` which waits till
result is available and returns the result.
The availability of the result can be checked using the lock returned
by `get_task_lock()` method
FIXME: wait/cont currently do not work in parallel
'''
def __init__(self, command_manager, block=True):
super(Controller, self).__init__()
self.__command_manager = command_manager
self.daemon = True
self.block = block
self._set_methods()
def _set_methods(self):
for prop in self.__command_manager.solver_props:
setattr(self, 'get_'+prop, wrap_dispatcher(self, self.__command_manager.dispatch, 'get', prop))
setattr(self, 'set_'+prop, wrap_dispatcher(self, self.__command_manager.dispatch, 'set', prop))
for meth in self.__command_manager.solver_methods:
setattr(self, meth, wrap_dispatcher(self, self.__command_manager.dispatch, meth))
for meth in self.__command_manager.lazy_methods:
setattr(self, meth, wrap_dispatcher(self, self.__command_manager.dispatch, meth))
for meth in self.__command_manager.active_methods:
setattr(self, meth, wrap_dispatcher(self, self.__command_manager.dispatch, meth))
def get(self, name):
''' get a solver property; returns immediately '''
return self.__command_manager.dispatch(self.block, 'get', name)
def set(self, name, value):
''' set a solver property; returns immediately; '''
return self.__command_manager.dispatch(self.block, 'set', name, value)
def pause_on_next(self):
''' pause the solver thread on next iteration '''
return self.__command_manager.pause_on_next()
def wait(self):
''' block the calling thread until the solver thread pauses
call this only after calling the `pause_on_next` method to tell
the controller to pause the solver thread'''
self.__command_manager.wait()
return True
def get_prop_names(self):
return list(self.__command_manager.solver_props)
def cont(self):
''' continue solver thread after it has been paused by `pause_on_next`
call this only after calling the `pause_on_next` method '''
return self.__command_manager.cont()
def get_result(self, task_id):
''' get the result of a previously queued command '''
return self.__command_manager.get_result(task_id)
def set_blocking(self, block):
''' set the blocking mode to True/False
In blocking mode (block=True) all methods other than getting of
solver properties block until the command is executed by the solver
and return the results. The blocking time can vary depending on the
time taken by solver per iteration and the command_interval
In non-blocking mode, these methods queue the command for later
and return a string corresponding to the task_id of the operation.
The result can be later obtained by a (blocking) call to get_result
with the task_id as argument
'''
if block != self.block:
self.block = block
self._set_methods()
return self.block
def get_blocking(self):
''' get the blocking mode ( True/False ) '''
return self.block
def ping(self):
return True
def on_root_proc(f):
''' run the decorated function only on the root proc '''
@wraps(f)
def wrapper(self, *args, **kwds):
if self.comm.Get_rank()==0:
return f(self, *args, **kwds)
return wrapper
def in_parallel(f):
''' return a list of results of running decorated function on all procs '''
@wraps(f)
def wrapper(self, *args, **kwds):
return self.comm.gather(f(self, *args, **kwds))
return wrapper
class CommandManager(object):
''' Class to manage and synchronize commands from various Controllers '''
solver_props = set(('t', 'tf', 'dt', 'count', 'pfreq', 'fname',
'detailed_output', 'output_directory', 'command_interval'))
solver_methods = set(('dump_output',))
lazy_methods = set(('get_particle_array_names', 'get_named_particle_array',
'get_particle_array_combined', 'get_particle_array_from_procs'))
active_methods = set(('get_status', 'get_task_lock', 'set_log_level'))
def __init__(self, solver, comm=None):
if comm is not None:
self.comm = comm
else:
try:
self.comm = solver.particles.cell_manager.parallel_controller.comm
except AttributeError:
self.comm = DummyComm()
logger.info('CommandManager: using comm: %s'%self.comm)
self.solver = solver
self.interfaces = []
self.func_dict = {}
self.rlock = threading.RLock()
self.res_lock = threading.Lock()
self.plock = threading.Condition()
self.qlock = threading.Condition() # queue lock
self.queue = []
self.queue_dict = {}
self.queue_lock_map = {}
self.results = {}
self.pause = set([])
@on_root_proc
def add_interface(self, callable, block=True):
''' Add a callable interface to the controller
The callable must accept an Controller instance argument.
The callable is called in a new thread of its own and it can
do various actions with methods defined on the Controller
instance passed to it
The new created thread is set to daemon mode and returned
'''
logger.info('adding_interface: %s'%callable)
control = Controller(self, block)
thr = threading.Thread(target=callable, args=(control,))
thr.daemon = True
thr.start()
return thr
def add_function(self, callable, interval=1):
''' add a function to to be called every `interval` iterations '''
l = self.func_dict[interval] = self.func_dict.get(interval, [])
l.append(callable)
def execute_commands(self, solver):
''' called by the solver after each timestep '''
# TODO: first synchronize all the controllers in different processes
# using mpi
self.sync_commands()
with self.qlock:
self.run_queued_commands()
logger.info('control handler: count=%d'%solver.count)
for interval in self.func_dict:
if solver.count%interval == 0:
for func in self.func_dict[interval]:
func(solver)
self.wait_for_cmd()
def wait_for_cmd(self):
''' wait for command from any interface '''
with self.qlock:
while self.pause:
with self.plock:
self.plock.notify_all()
self.qlock.wait()
self.run_queued_commands()
def sync_commands(self):
''' send the pending commands to all the procs in parallel run '''
self.queue_dict, self.queue, self.pause = self.comm.bcast((self.queue_dict, self.queue, self.pause))
def run_queued_commands(self):
while self.queue:
lock_id = self.queue.pop(0)
meth, args, kwargs = self.queue_dict[lock_id]
with self.res_lock:
try:
self.results[lock_id] = self.run_command(meth, args, kwargs)
finally:
del self.queue_dict[lock_id]
if self.comm.Get_rank()==0:
self.queue_lock_map[lock_id].release()
def run_command(self, cmd, args=[], kwargs={}):
res = self.dispatch_dict[cmd](self, *args, **kwargs)
logger.info('controller: running_command: %s %s %s %s'%(
cmd, args, kwargs, res))
return res
def pause_on_next(self):
''' pause and wait for command on the next control interval '''
if self.comm.Get_size() > 1:
logger.info('pause/continue noy yet supported in parallel runs')
return False
with self.plock:
self.pause.add(threading.current_thread().ident)
self.plock.notify()
return True
def wait(self):
with self.plock:
self.plock.wait()
def cont(self):
''' continue after a pause command '''
if self.comm.Get_size() > 1:
logger.info('pause/continue noy yet supported in parallel runs')
return
with self.plock:
self.pause.remove(threading.current_thread().ident)
self.plock.notify()
with self.qlock:
self.qlock.notify_all()
def get_result(self, lock_id):
''' get the result of a previously queued command '''
lock_id = int(lock_id)
lock = self.queue_lock_map[lock_id]
with lock:
with self.res_lock:
ret = self.results[lock_id]
del self.results[lock_id]
del self.queue_lock_map[lock_id]
return ret
def get_task_lock(self, lock_id):
''' get the Lock instance associated with a command '''
return self.queue_lock_map[int(lock_id)]
def get_prop(self, name):
''' get a solver property '''
return getattr(self.solver, name)
def set_prop(self, name, value):
''' set a solver property '''
return setattr(self.solver, name, value)
def solver_method(self, name, *args, **kwargs):
''' execute a method on the solver '''
ret = getattr(self.solver, name)(*args, **kwargs)
ret = self.comm.gather(ret)
return ret
def get_particle_array_names(self):
''' get the names of the particle arrays '''
return [pa.name for pa in self.solver.particles.arrays]
def get_named_particle_array(self, name, props=None):
for pa in self.solver.particles.arrays:
if pa.name == name:
if props:
return [getattr(pa, p) for p in props if hasattr(pa, p)]
else:
return pa
def get_particle_array_index(self, name):
''' get the index of the named particle array '''
for i,pa in enumerate(self.solver.particles.arrays):
if pa.name == name:
return i
def get_particle_array_from_procs(self, idx, procs=None):
''' get particle array at index from all processes
specifying processes is currently not implemented
'''
if procs is None:
procs = range(self.comm.size)
pa = self.solver.particles.arrays[idx]
pas = self.comm.gather(pa)
return pas
def get_particle_array_combined(self, idx, procs=None):
''' get a single particle array with combined data from all procs
specifying processes is currently not implemented
'''
if procs is None:
procs = range(self.comm.size)
pa = self.solver.particles.arrays[idx]
pas = self.comm.gather(pa)
pa = ParticleArray(name=pa.name)
for p in pas:
pa.append_parray(p)
return pa
def get_status(self):
''' get the status of the controller '''
return 'commands queued: %d'%len(self.queue)
def set_log_level(self, level):
''' set the logging level '''
logger.setLevel(level)
dispatch_dict = {'get':get_prop, 'set':set_prop}
for meth in solver_methods:
dispatch_dict[meth] = solver_method
for meth in lazy_methods:
dispatch_dict[meth] = locals()[meth]
for meth in active_methods:
dispatch_dict[meth] = locals()[meth]
@synchronized
def dispatch(self, block, meth, *args, **kwargs):
''' execute/queue a command with specified arguments '''
if meth in self.dispatch_dict:
if meth=='get' or meth=='set':
prop = args[0]
if prop not in self.solver_props:
raise RuntimeError('Invalid dispatch on method: %s with '
'non-existant property: %s '%(meth,prop))
if block or meth=='get' or meth in self.active_methods:
logger.info('controller: immediate dispatch(): %s %s %s'%(
meth, args, kwargs))
return self.dispatch_dict[meth](self, *args, **kwargs)
else:
lock = threading.Lock()
lock.acquire()
lock_id = id(lock)
with self.qlock:
self.queue_lock_map[lock_id] = lock
self.queue_dict[lock_id] = (meth, args, kwargs)
self.queue.append(lock_id)
logger.info('controller: dispatch(%d): %s %s %s'%(
lock_id, meth, args, kwargs))
return str(lock_id)
else:
raise RuntimeError('Invalid dispatch on method: '+meth)
| [
[
8,
0,
0.0023,
0.0023,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0068,
0.0023,
0,
0.66,
0.0833,
711,
0,
2,
0,
0,
711,
0,
0
],
[
1,
0,
0.0091,
0.0023,
0,
0.66... | [
"''' Implement infrastructure for the solver to add various interfaces '''",
"from functools import wraps, partial",
"import threading, thread",
"from pysph.base.particle_array import ParticleArray",
"import logging",
"logger = logging.getLogger()",
"class DummyComm(object):\n ''' A dummy MPI.Comm im... |
"""
Module contains some common functions.
"""
# standard imports
import pickle
import numpy
import sys
import os
import platform
import commands
from numpy.lib import format
HAS_PBAR = True
try:
import progressbar
except ImportError:
HAS_PBAR = False
import pysph
def check_array(x, y):
"""Check if two arrays are equal with an absolute tolerance of
1e-16."""
return numpy.allclose(x, y, atol=1e-16, rtol=0)
def zipfile_factory(*args, **kwargs):
import zipfile
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
*args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
**kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with *args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with **kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
See Also
--------
numpy.savez_compressed : Save several arrays into a compressed .npz file
format
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : string
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
msg = "Cannot use un-named variables and keyword %s" % key
raise ValueError, msg
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, numpy.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
#############################################################################
def get_distributed_particles(pa, comm, cell_size):
# FIXME: this can be removed once the examples all use Application.
from pysph.parallel.load_balancer import LoadBalancer
rank = comm.Get_rank()
num_procs = comm.Get_size()
if rank == 0:
lb = LoadBalancer.distribute_particles(pa, num_procs=num_procs,
block_size=cell_size)
else:
lb = None
particles = comm.scatter(lb, root=0)
return particles
################################################################################
# `PBar` class.
###############################################################################
class PBar(object):
"""A simple wrapper around the progressbar so it works if a user has
it installed or not.
"""
def __init__(self, maxval, show=True):
bar = None
self.count = 0
self.maxval = maxval
self.show = show
if HAS_PBAR and show:
widgets = [progressbar.Percentage(), ' ', progressbar.Bar(),
progressbar.ETA()]
bar = progressbar.ProgressBar(widgets=widgets,
maxval=maxval).start()
self.bar = bar
def update(self):
self.count += 1
if self.bar is not None:
self.bar.update(self.count)
elif self.show:
sys.stderr.write('\r%d%%'%int(self.count*100/self.maxval))
sys.stderr.flush()
def finish(self):
if self.bar is not None:
self.bar.finish()
elif self.show:
sys.stderr.write('\r100%\n')
sys.stderr.flush()
##############################################################################
# friendly mkdir from http://code.activestate.com/recipes/82465/.
##############################################################################
def mkdir(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
mkdir(head)
if tail:
try:
os.mkdir(newdir)
# To prevent race in mpi runs
except OSError as e:
import errno
if e.errno == errno.EEXIST and os.path.isdir(newdir):
pass
else:
raise
##############################################################################
# read pickled data from a file
##############################################################################
def get_pickled_data(fname):
f = open(fname, 'r')
data = pickle.load(f)
f.close()
return data
def get_pysph_root():
return os.path.split(pysph.__file__)[0]
##############################################################################
# Load an output file
##############################################################################
def load(fname):
""" Load and return data from an output (.npz) file dumped by PySPH.
For output file version 1, the function returns a dictionary with
the keys:
solver_data : Solver constants at the time of output like time,
time step and iteration count.
arrays : ParticleArrays keyed on names with the ParticleArray
object as value.
"""
from pysph.base.particle_array import get_particle_array
data = numpy.load(fname)
ret = {"arrays":{}}
if not 'version' in data.files:
msg = "Wrong file type! No version nnumber recorded."
raise RuntimeError(msg)
version = data['version']
if version == 1:
arrays = data["arrays"]
arrays.shape = (1,)
arrays = arrays[0]
solver_data = data["solver_data"]
solver_data.shape = (1,)
solver_data = solver_data[0]
for array_name in arrays:
array = get_particle_array(name=array_name,
cl_precision="single",
**arrays[array_name])
ret["arrays"][array_name] = array
ret["solver_data"] = solver_data
else:
raise RuntimeError("Version not understood!")
return ret
def load_and_concatenate(prefix,nprocs=1,directory=".",count=None):
"""Load the results from multiple files.
Given a filename prefix and the number of processors, return a
concatenated version of the dictionary returned via load.
Parameters:
-----------
prefix : str
A filename prefix for the output file.
nprocs : int
The number of processors (files) to read
directory : str
The directory for the files
count : int
The file iteration count to read. If None, the last available
one is read
"""
if count is None:
counts = [i.rsplit('_',1)[1][:-4] for i in os.listdir(directory) if i.startswith(prefix) and i.endswith('.npz')]
counts = sorted( [int(i) for i in counts] )
count = counts[-1]
arrays_by_rank = {}
for rank in range(nprocs):
fname = os.path.join(directory, prefix+'_'+str(rank)+'_'+str(count)+'.npz')
data = load(fname)
arrays_by_rank[rank] = data["arrays"]
arrays = _concatenate_arrays(arrays_by_rank, nprocs)
data["arrays"] = arrays
return data
def _concatenate_arrays(arrays_by_rank, nprocs):
"""Concatenate arrays into one single particle array. """
if nprocs <= 0:
return 0
array_names = arrays_by_rank[0].keys()
first_processors_arrays = arrays_by_rank[0]
if nprocs > 1:
ret = {}
for array_name in array_names:
first_array = first_processors_arrays[array_name]
for rank in range(1,nprocs):
other_processors_arrays = arrays_by_rank[rank]
other_array = other_processors_arrays[array_name]
# append the other array to the first array
first_array.append_parray(other_array)
# remove the non local particles
first_array.remove_tagged_particles(1)
ret[array_name] = first_array
else:
ret = arrays_by_rank[0]
return ret
| [
[
8,
0,
0.0049,
0.0073,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0146,
0.0024,
0,
0.66,
0.0435,
848,
0,
1,
0,
0,
848,
0,
0
],
[
1,
0,
0.017,
0.0024,
0,
0.66,... | [
"\"\"\"\nModule contains some common functions.\n\"\"\"",
"import pickle",
"import numpy",
"import sys",
"import os",
"import platform",
"import commands",
"from numpy.lib import format",
"HAS_PBAR = True",
"try:\n import progressbar\nexcept ImportError:\n HAS_PBAR = False",
" import pr... |
""" An example solver for the circular patch of fluid """
import numpy
from optparse import OptionGroup, Option
import pysph.base.api as base
import pysph.sph.api as sph
from pysph.sph.funcs import stress_funcs
from pysph.sph.funcs import eos_funcs
from pysph.sph.funcs import viscosity_funcs
from solver import Solver
from post_step_functions import CFLTimeStepFunction
from sph_equation import SPHOperation, SPHIntegration
from pysph.sph.funcs.arithmetic_funcs import PropertyGet, PropertyAdd
from pysph.sph.funcs.basic_funcs import KernelSum
Fluids = base.ParticleType.Fluid
Solids = base.ParticleType.Solid
def get_particle_array(xsph=True, mart_stress=True, **kwargs):
kwargs.setdefault('type', 1)
kwargs.setdefault('name', 'solid')
pa = base.get_particle_array(**kwargs)
for i in range(3):
for j in range(i+1):
pa.add_property(dict(name='sigma%d%d'%(j,i)))
if xsph:
pa.add_property(dict(name='ubar'))
pa.add_property(dict(name='vbar'))
pa.add_property(dict(name='wbar'))
if mart_stress:
for i in range(3):
for j in range(i+1):
pa.add_property(dict(name='MArtStress%d%d'%(j,i)))
return pa
def get_circular_patch(name="", type=1, dx=0.25):
x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -1.05:1.05+1e-4:dx]
x = x.ravel()
y = y.ravel()
m = numpy.ones_like(x)*dx*dx
h = numpy.ones_like(x)*2*dx
rho = numpy.ones_like(x)
z = 1-rho
p = 0.5*1.0*100*100*(1 - (x**2 + y**2))
cs = numpy.ones_like(x) * 100.0
u = -100*x
v = 100*y
indices = []
for i in range(len(x)):
if numpy.sqrt(x[i]*x[i] + y[i]*y[i]) - 1 > 1e-10:
indices.append(i)
pa = base.get_particle_array(x=x, y=y, m=m, rho=rho, h=h, p=p, u=u, v=v,
cs=cs,name=name, type=type,
sigma00=z, sigma11=z, sigma22=z,
sigma01=z, sigma12=z, sigma02=z)
pa.constants['E'] = 1e9
pa.constants['nu'] = 0.3
pa.constants['G'] = pa.constants['E']/(2.0*1+pa.constants['nu'])
pa.constants['K'] = stress_funcs.get_K(pa.constants['G'], pa.constants['nu'])
pa.constants['rho0'] = 1.0
pa.constants['c_s'] = numpy.sqrt(pa.constants['K']/pa.constants['rho0'])
la = base.LongArray(len(indices))
la.set_data(numpy.array(indices))
pa.remove_particles(la)
pa.set(idx=numpy.arange(len(pa.x)))
print 'Number of particles: ', len(pa.x)
return pa
class StressSolver(Solver):
def __init__(self, dim, integrator_type, xsph=0.5, marts_eps=0.3, marts_n=4,
CFL=None, martv_alpha=1.0, martv_beta=1.0,
co=None, ro=None):
''' constructor
Parameters
----------
xsph : float
correction factor for xsph (0=disabled, default=0.5)
marts_eps : float
correction factor epsilon for Monaghan's artificial stress term
(0=disabled, default=0.3)
marts_n : float
correction factor kernel exponent for Monaghan's
artificial stress term
CFL : float or None
the CFL number if time-step is to be based on CFL (use < 0.3)
dim, integrator_type : see :py:meth:`Solver.__init__`
'''
self.defaults = dict(xsph=xsph,
marts_eps=marts_eps,
marts_n=marts_n,
martv_alpha=martv_alpha,
martv_beta=martv_beta,
cfl=CFL,
co=co,
ro=ro
)
Solver.__init__(self, dim, integrator_type)
def initialize(self):
Solver.initialize(self)
self.print_properties.append('sigma00')
self.print_properties.extend(['sigma01', 'sigma11'])
self.print_properties.extend(['sigma02', 'sigma12', 'sigma22'])
self.print_properties.append('MArtStress00')
self.print_properties.extend(['MArtStress01', 'MArtStress11'])
self.print_properties.extend(['MArtStress02', 'MArtStress12', 'MArtStress22'])
def get_options(self, opt_parser):
opt = OptionGroup(opt_parser, "Stress Solver Options")
opt.add_option('--xsph', action='store', type='float',
dest='xsph', default=self.defaults['xsph'],
help='set the XSPH correction weight factor (default=0.5)')
opt.add_option('--marts_eps', dest='marts_eps', type='float',
default=self.defaults['marts_eps'],
help='set the Monaghan artificial stress weight factor (0.3)')
opt.add_option('--marts_n', dest='marts_n', type='float',
default=self.defaults['marts_n'],
help='set the Monaghan artificial stress exponent (4)')
opt.add_option('--martv_alpha', dest='martv_alpha', type='float',
default=self.defaults['martv_alpha'],
help='set the Monaghan artificial viscosity alpha (1)')
opt.add_option('--martv_beta', dest='martv_beta', type='float',
default=self.defaults['martv_beta'],
help='set the Monaghan artificial viscosity beta (1)')
opt.add_option('--co', dest="co", type="float",
default=self.defaults["co"],
help="Set the reference sound speed c0 ")
opt.add_option("--ro", dest="ro", type="float",
default=self.defaults["ro"],
help="Set the reference density r0")
cfl_opt = Option('--cfl', dest='cfl', type='float',
default=self.defaults['cfl'],
help='set the cfl number for determining the timestep '
'of simulation')
return opt, cfl_opt
def setup_solver(self, options=None):
options = options or self.defaults
xsph = options.get('xsph')
marts_eps = options.get('marts_eps')
marts_n = options.get('marts_n')
martv_alpha = options.get('martv_alpha')
martv_beta = options.get('martv_beta')
cfl = options.get('cfl')
co = options.get("co")
ro = options.get("ro")
# Add the operations
# Equation of state
self.add_operation(SPHOperation(
stress_funcs.BulkModulusPEqn,
on_types=[Solids],
updates=['p'],
id='eos')
)
# Monaghan Artificial Stress
if marts_eps:
self.add_operation(SPHOperation(
stress_funcs.MonaghanArtStressD.withargs(eps=marts_eps),
on_types=[Solids],
updates=['MArtStress00','MArtStress11','MArtStress22'],
id='mart_stress_d')
)
self.add_operation(SPHOperation(
stress_funcs.MonaghanArtStressS.withargs(eps=marts_eps),
on_types=[Solids],
updates=['MArtStress12','MArtStress02','MArtStress01'],
id='mart_stress_s')
)
self.add_operation(SPHIntegration(
stress_funcs.MonaghanArtStressAcc.withargs(n=marts_n),
from_types=[Fluids, Solids], on_types=[Solids],
updates=['u','v','w'],
id='mart_stressacc')
)
# Density Rate
self.add_operation(SPHIntegration(
sph.SPHDensityRate.withargs(hks=False),
from_types=[Solids], on_types=[Solids],
updates=['rho'],
id='density')
)
# Momenttm Equation. Deviatoric stress component
self.add_operation(SPHIntegration(
stress_funcs.SimpleStressAcceleration,
from_types=[Fluids, Solids], on_types=[Solids],
updates=['u','v','w'],
id='stressacc')
)
# Momentum equation. Symmetric component.
self.add_operation(SPHIntegration(
stress_funcs.PressureAcceleration.withargs(alpha=martv_alpha,
beta=martv_beta,
eta=0.0),
from_types=[Fluids, Solids], on_types=[Solids],
updates=['u','v','w'],
id='pacc')
)
# XSPH correction
if xsph:
self.add_operation(SPHIntegration(
sph.XSPHCorrection.withargs(eps=xsph, hks=False),
from_types=[Solids], on_types=[Solids],
updates=['u','v','w'],
id='xsph')
)
# Deviatoric stress rate
self.add_operation(SPHIntegration(
stress_funcs.StressRateD.withargs(xsph=bool(xsph)),
from_types=[Fluids, Solids], on_types=[Solids],
updates=['sigma00','sigma11','sigma22'],
id='stressD')
)
# Deviatoric stress rate
self.add_operation(SPHIntegration(
stress_funcs.StressRateS.withargs(xsph=bool(xsph)),
from_types=[Fluids, Solids], on_types=[Solids],
updates=['sigma12','sigma02','sigma01'],
id='stressS')
)
# Position Stepping
self.add_operation(SPHIntegration(
sph.PositionStepping,
on_types=[Solids],
updates=['x','y','z'],
id='pos')
)
# Time step function
if cfl:
self.pre_step_functions.append(CFLTimeStepFunction(cfl))
#############################################################################
| [
[
8,
0,
0.0033,
0.0033,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0098,
0.0033,
0,
0.66,
0.0588,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.0164,
0.0033,
0,
0.66... | [
"\"\"\" An example solver for the circular patch of fluid \"\"\"",
"import numpy",
"from optparse import OptionGroup, Option",
"import pysph.base.api as base",
"import pysph.sph.api as sph",
"from pysph.sph.funcs import stress_funcs",
"from pysph.sph.funcs import eos_funcs",
"from pysph.sph.funcs impo... |
from pysph.sph.sph_calc import SPHCalc
from pysph.sph.funcs.arithmetic_funcs import PropertyGet
import numpy
import logging
logger = logging.getLogger()
#############################################################################
#`Integrator` class
#############################################################################
class Integrator(object):
""" The base class for all integrators. Currently, the following
integrators are supported:
(a) Forward Euler Integrator
(b) RK2 Integrator
(c) RK4 Integrator
(d) Predictor Corrector Integrator
(e) Leap Frog Integrator
The integrator operates on a list of SPHCalc objects which define
the interaction between a single destination particle array and a
list of source particle arrays.
An instance of SPHCalc is called a `calc` and thus, the integrator
operates on a list of calcs. The calcs serve as the functions to
be evaluated for the integrator.
A calc can be integrating or non integrating depending on the
operation it represents. For example, the summation density and
density rate operations result in calcs that are non integrating
and integrating respectively. Note that both of them operate on
the same LHS variable, namely the density.
Example:
=========
Consider a dam break simulation with two particle arrays, fluid
and boundary. The operations are
(a) Tait equation (updates=['p','cs'])
(b) Density Rate (updates=['rho'])
(c) Momentum equation with avisc (updates = ['u','v'])
(d) Gravity force (updates = ['u','v'])
(e) Position Stepping (updates=['x','y'])
(f) XSPH Correction (updates=['x','y'])
Integration of this system relies on the use of two dictionaries:
(1) initial_properties
The initial_properties for the integrator would look like:
{
'fluid': {'x':'_x0', 'y':'_y0', 'u':'_u0', 'v':'_v0', ...},
'boundary':{'rho':'_rho0'}
}
that is, the initial_properties serves as a mapping between names
of particle properties that need to be stepped and their initial
values, per particle array. This is needed for multi-step
integrators since we need the final step is with respect to the
initial properties. The initial_properties is used to save out the
properties once at the start of the integration step.
(2) step_props
The step_props dictionary looks like:
{
'fluid':{1:{ 'x':['_x0', '_a_x_1'], 'rho':['_rho0', '_a_rho_1'] ... }
'boundary':{1: {'rho':['_rho0', '_a_rho_1']} }
}
that is, for each stage of the integration (k1, k2..) a dictionary
is stored. This dictionary is keyed on the property to be stepped
and has as value, a list of two strings. The first string is the
name of the intial array for this property to be stepped and the
second is the name of the variable in which the acceleration for
this property is stored.
The initial_properties and step_props dicts are constructed at
setup time while examining the calcs for their update
properties. A single acceleration variable is used for each
property that needs to be stepped.
The naming convention for the acceleration variable is
'_a_<prop_name>_<stage>', thus, the acceleration variable for
velocity at the 2nd stage of an integrator would be '_a_u_2'
Using these two dictionaries, a typical step for the integrator is
the following:
(a) Save Intial Arrays:
------------------------
This is easily done using the initial_properties dict. A call is
made to the particle array to copy over the values as represented
by the mapping.
(b) Reset Accelerations.
------------------------
Since one acceleration variable is used per property to be
stepped, the accelerations must be set to zero before the eval
phase of the integrator. This is because the accelerations will be
appended at each call to the underlying SPHFuncton.
(c) Evaluate the RHS.
-----------------------
Each calc calls it's eval method with appropriate arguments to
store the results of the evaluation.
For integrating calcs, the argument is the acceleration variable
for that property and that stage of the integration. This is where
the step_props dict comes in.
For non integrating calcs, the argument are the update properties
for that calc.
(d) Step
---------
Once the calcs have been evaluated in order, the accelerations are
stored in the appropriate variables for each particle array.
Using the step_props dict, we can step the properties for that
stage.
(e) Update particles
---------------------
Typically, the positions of the particles will be updated in (d)
and this means that the indexing scheme is outdated. This
necessitates an update to recompute the neighbor information.
"""
def __init__(self, particles=None, calcs=[], pcalcs = []):
self.particles = particles
# the calcs used for the RHS evaluations
self.calcs = calcs
# the number of steps for the integrator. Typically equal to
# the number of k arrays required.
self.nsteps = 1
# counter for the current stage of the integrator.
self.cstep = 1
# global and local time
self.time = 0.0
self.local_time = 0.0
# list of particle properties to be updated across processors.
self.rupdate_list = []
# mapping between names of step properties and accelerations
# per stage, per particle array
self.step_props = {}
# mapping between step prop name and it's initial prop name
# per particle array
self.initial_properties = {}
# store the velocity accelerations per array per stage
self.velocity_accelerations = {}
def set_rupdate_list(self):
""" Generate the remote update list.
The format of this list is tied to ParallelCellManager.
"""
for i in range(len(self.particles.arrays)):
self.rupdate_list.append([])
def setup_integrator( self ):
""" Setup the integrator.
This function sets up the initial_properties and step_props
dicts which are used extensively for the integration.
A non-integrating calc is used to update the property of some
variable as a function of other variables ( eg p = f(rho)
).
An integrating calc computes the accelerations for some LHS
property.
During the eval phase, a calc must pass in a string defining
the output arrays to append the RHS result to. For a
non-integrating calc this is simply the calc's update
property. For an integrating calc, the arguments must be the
accelerations for that property.
"""
# save the arrays for easy reference
self.arrays = self.particles.arrays
# intialize the step_props and initial_properties.
for array in self.arrays:
self.step_props[array.name] = {}
self.initial_properties[array.name] = {}
# Initialize the velocity accelerations dict per array
self.velocity_accelerations[array.name] = {}
# step props needs a dict per stage of the integration as well
for k in range(self.nsteps):
k_num = k + 1
self.step_props[array.name][k_num] = {}
self.velocity_accelerations[array.name][k_num] = {}
for calc in self.calcs:
# get the destination particle array for the calc
dest = calc.dest
updates = calc.updates
nupdates = len(updates)
# the initial properties and accelerations need to be
# defined in the case of integrating calcs
if calc.integrates:
for j in range(nupdates):
update_prop = updates[j]
# define and add the property to the destination array
initial_prop = '_' + update_prop + '0'
dest.add_property( {"name":initial_prop} )
# save the intial property
self.initial_properties[dest.name][update_prop]=initial_prop
# an acceleration needs to be defined for every stage.
for k in range(self.nsteps):
k_num = k + 1
# define and add the acceleration variable
step_prop = '_a_' + update_prop + '_' + str(k_num)
dest.add_property( {"name":step_prop} )
# save the acceleration variable
self.step_props[dest.name][k_num][update_prop] = \
[initial_prop, step_prop]
# tell the calc to use this acceleration
# variable as the argument for the eval phase
dst_writes = calc.dst_writes.get(k_num)
if not dst_writes:
calc.dst_writes[k_num] = []
calc.dst_writes[k_num].append( step_prop )
self.set_rupdate_list()
def reset_accelerations(self, step):
""" Reset the accelerations.
Parameters:
-----------
step : int
The stage of the integrator for which to reset the accelerations.
"""
for array in self.arrays:
zeros = numpy.zeros( array.get_number_of_particles() )
for step_prop in self.step_props[ array.name ][step]:
acc_prop = self.step_props[array.name][step][step_prop][1]
array.set(**{acc_prop:zeros} )
def save_initial_arrays(self):
""" Save the initial arrays. """
for array in self.arrays:
array.copy_over_properties( self.initial_properties[array.name] )
def eval(self):
""" Evaluate the LHS as defined by the calcs.
For evaluations that are time dependant, we rely on the
itnegrator's local time variable to determine what time we're
at.
As an example, an RK2 integrator would perorm two evaluations:
K1 is evaluated at self.local_time = self.time
K2 is evaluated at self.local_time = self.time + dt/2
It is the responsibility of the integrator's `integrate`
method to update the local time variable used by `eval`
"""
calcs = self.calcs
ncalcs = len(calcs)
particles = self.particles
k_num = self.cstep
for i in range(ncalcs):
calc = calcs[i]
# set the time for the destination particle array
calc.dest.set_time(self.local_time)
# Evaluate the calc
if calc.integrates:
if calc.tensor_eval:
calc.tensor_sph( *calc.dst_writes[k_num] )
else:
calc.sph( *calc.dst_writes[k_num] )
else:
calc.sph( *calc.updates )
# ensure all processes have reached this point
particles.barrier()
# update the properties for remote particles
self.rupdate_list[calc.dnum] = [calc.updates]
particles.update_remote_particle_properties(
self.rupdate_list)
# ensure that all processors have evaluated the RHS's
# not likely that this is necessary.
particles.barrier()
def step(self, dt):
""" Step the particle properties. """
# get the current stage of the integration
k_num = self.cstep
for array in self.arrays:
# get the mapping for this array and this stage
to_step = self.step_props[ array.name ][k_num]
for prop in to_step:
initial_prop = to_step[ prop ][0]
step_prop = to_step[ prop ][1]
initial_arr = array.get( initial_prop )
step_arr = array.get( step_prop )
updated_array = initial_arr + step_arr * dt
array.set( **{prop:updated_array} )
# store the acceleration arrays
if prop in ['u','v','w']:
self.velocity_accelerations[array.name][k_num][step_prop] = step_arr
# Increment the step by 1
self.cstep += 1
def get_max_acceleration(self, array, solver):
if solver.count == 1:
return solver.dt
if not ( array in self.arrays ):
raise RuntimeError("Array %s does not belong to me "%array.name)
acc = -numpy.inf
if array.properties.has_key("_a_u_1"):
dim = solver.dim
if dim == 1:
ax = self.step_props[array.name][1]['u'][1]
k1_x = self.velocity_accelerations[array.name][1][ax]
acc = max( acc, numpy.max(numpy.abs(k1_x)) )
elif dim == 2:
ax = self.step_props[array.name][1]['u'][1]
k1_x = self.velocity_accelerations[array.name][1][ax]
ay = self.step_props[array.name][1]['v'][1]
k1_y = self.velocity_accelerations[array.name][1][ay]
acc = max( acc, numpy.max(numpy.sqrt(k1_x*k1_x +\
k1_y*k1_y)) )
elif dim == 3:
ax = self.step_props[array.name][1]['u'][1]
k1_x = self.velocity_accelerations[array.name][1][ax]
ay = self.step_props[array.name][1]['v'][1]
k1_y = self.velocity_accelerations[array.name][1][ay]
az = self.step_props[array.name][1]['w'][1]
k1_z = self.velocity_accelerations[array.name][1][az]
acc = max( acc,
numpy.max(numpy.sqrt(k1_x*k1_x + \
k1_y*k1_y + \
k1_z*k1_z)) )
return acc
def integrate(self, dt):
raise NotImplementedError
##############################################################################
#`EulerIntegrator` class
##############################################################################
class EulerIntegrator(Integrator):
""" Euler integration of the system X' = F(X) with the formula:
X(t + h) = X + h*F(X)
"""
def __init__(self, particles, calcs):
Integrator.__init__(self, particles, calcs)
self.nsteps = 1
def integrate(self, dt):
# set the initial properties
self.save_initial_arrays() # X0 = X(t)
# Euler step
self.reset_accelerations(step=1)
# set the local time to the integrator's time
self.local_time = self.time
self.eval() # F(X) = k1
self.step( dt ) # X(t + h) = X0 + h*k1
self.particles.update()
self.cstep = 1
##############################################################################
#`RK2Integrator` class
##############################################################################
class RK2Integrator(Integrator):
""" RK2 Integration for the system X' = F(X) with the formula:
# Stage 1
K1 = F(X)
X(t + h/2) = X0 + h/2*K1
# Stage 2
K1 = F( X(t+h/2) )
X(t + h) = X0 + h * K1
"""
def __init__(self, particles, calcs):
Integrator.__init__(self, particles, calcs)
self.nsteps = 1
def integrate(self, dt):
# set the initial arrays
self.save_initial_arrays() # X0 = X(t)
#############################################################
# Stage 1
#############################################################
self.reset_accelerations(step=1)
# set the local time to the integrator's time
self.local_time = self.time
self.eval() # K1 = F(X)
self.step(dt/2) # F(X+h/2) = X0 + h/2*K1
self.particles.update()
self.cstep = 1
#############################################################
# Stage 2
#############################################################
self.reset_accelerations(step=1)
# update the local time
self.local_time = self.time + dt/2
self.eval() # K1 = F( X(t+h/2) )
self.step(dt) # F(X+h) = X0 + h*K1
self.particles.update()
self.cstep = 1
##############################################################################
#`RK4Integrator` class
##############################################################################
class RK4Integrator(Integrator):
""" RK4 Integration of a system X' = F(X) using the scheme
# Stage 1
K1 = F(X)
X(t + h/2) = X0 + h/2*K1
# Stage 2
K2 = F( X(t + h/2) )
X(t + h/2) = X0 + h/2*K2
# Stage 3
K3 = F( X(t + h/2) )
X(t + h) = X0 + h*K3
# Stage 4
K4 = F( X(t + h) )
X(t + h) = X0 + h/6 * ( K1 + 2*K2 + 2*K3 + K4 )
"""
def __init__(self, particles, calcs):
Integrator.__init__(self, particles, calcs)
self.nsteps = 4
def final_step(self, dt):
""" Perform the final step for RK4 integration """
fac = 1.0/6.0
for array in self.arrays:
to_step_k1 = self.step_props[array.name][1]
to_step_k2 = self.step_props[array.name][2]
to_step_k3 = self.step_props[array.name][3]
to_step_k4 = self.step_props[array.name][4]
for prop in to_step_k1:
initial_array = array.get( to_step_k1[prop][0] )
k1_array = array.get( to_step_k1[prop][1] )
k2_array = array.get( to_step_k2[prop][1] )
k3_array = array.get( to_step_k3[prop][1] )
k4_array = array.get( to_step_k4[prop][1] )
updated_array = initial_array + fac*dt*(k1_array + \
2*k2_array + \
2*k3_array + \
k4_array)
array.set( **{prop:updated_array} )
def integrate(self, dt):
# save the initial arrays
self.save_initial_arrays() # X0 = X(t)
#############################################################
# Stage 1
#############################################################
self.reset_accelerations(step=1)
# set the local time to the integrator's time
self.local_time = self.time
self.eval() # K1 = F(X)
self.step(dt/2) # X(t + h/2) = X0 + h/2*K1
self.particles.update()
#############################################################
# Stage 2
#############################################################
self.reset_accelerations(step=2)
# update the local time
self.local_time = self.time + dt/2
self.eval() # K2 = F( X(t+h/2) )
self.step(dt/2) # X(t+h/2) = X0 + h/2*K2
self.particles.update()
#############################################################
# Stage 3
#############################################################
self.reset_accelerations(step=3)
# update the local time
self.local_time = self.time + dt/2
self.eval() # K3 = F( X(t+h/2) )
self.step(dt) # X(t+h) = X0 + h*K3
self.particles.update()
#############################################################
# Stage 4
#############################################################
self.reset_accelerations(step=4)
# update the local_time
self.local_time = self.time + dt
self.eval() # K4 = F( X(t+h) )
self.final_step(dt) # X(t + h) = X0 + h/6(K1 + 2K2 + 2K3 + K4)
self.particles.update()
# reset the step counter
self.cstep = 1
##############################################################################
#`PredictorCorrectorIntegrator` class
##############################################################################
class PredictorCorrectorIntegrator(Integrator):
""" Predictor Corrector Integration of a system X' = F(X) using the scheme
Predict:
X(t + h/2) = X0 + h/2 * F(X)
Correct:
X(t + h/2) = X0 + h/2 * F( X(t + h/2) )
Step:
X(t + h) = 2*X(t + h/2) - X0
"""
def __init__(self, particles, calcs):
Integrator.__init__(self, particles, calcs)
self.nsteps = 1
def final_step(self):
""" Perform the final step in the PC integration method """
for array in self.arrays:
to_step = self.step_props[array.name][1]
for prop in to_step:
current_array = array.get( prop )
initial_array = array.get( to_step[prop][0] )
updated_array = 2*current_array - initial_array
array.set( **{prop:updated_array} )
def integrate(self, dt):
# save the initial arrays
self.save_initial_arrays() # X0 = X(t)
############################################################
# Predict
############################################################
self.reset_accelerations(step=1)
# set the local time to the integrator's time
self.local_time = self.time
self.eval() # K1 = F(X)
self.step(dt/2) # X(t+h/2) = X0 + h/2*K1
self.particles.update()
self.cstep = 1
##############################################################
# Correct
##############################################################
self.reset_accelerations(step=1)
# udpate the local time
self.local_time = self.time + dt/2
self.eval() # K1 = F( X(t+h/2) )
self.step(dt/2) # X(t+h/2) = X0 + h/2*K1
self.particles.update()
##############################################################
# Step
##############################################################
self.final_step() # X(t+h) = 2*X(t+h/2) - X0
self.particles.update()
self.cstep = 1
##############################################################################
#`LeapFrogIntegrator` class
##############################################################################
class LeapFrogIntegrator(Integrator):
""" Leap frog integration of a system :
\frac{Dv}{Dt} = F
\frac{Dr}{Dt} = v
\frac{D\rho}{Dt} = D
the prediction step:
vbar = v_0 + h * F_0
r = r_0 + h*v_0 + 0.5 * h * h * F_0
rhobar = rho_0 + h * D_0
correction step:
v = vbar + 0.5*h*(F - F_0)
rho = rhobar + 0.5*h*(D - D_0)
"""
def __init__(self, particles, calcs):
Integrator.__init__(self, particles, calcs)
self.nsteps = 2
def add_correction_for_position(self, dt):
ncalcs = len(self.icalcs)
pos_calc = self.pcalcs[0]
pos_calc_pa = self.arrays[pos_calc.dnum]
pos_calc_updates = pos_calc.updates
for calc in self.icalcs:
if calc.tag == "velocity":
pa = calc.dest
updates = calc.updates
for j in range(calc.nupdates):
update_prop = pos_calc_updates[j]
#k1_prop = self.k1_props['k1'][calc.id][j]
k1_prop = self.k_props[calc.id]['k1'][j]
# the current position
current_arr = pa.get(update_prop)
step_array = pa.get(k1_prop)
updated_array = current_arr + 0.5*dt*dt*step_array
pos_calc_pa.set(**{update_prop:updated_array})
def final_step(self, calc, dt):
#pa = self.arrays[calc.dnum]
pa = calc.dest
updates = calc.updates
for j in range(len(updates)):
update_prop = updates[j]
k1_prop = self.k_props[calc.id]['k1'][j]
k2_prop = self.k_props[calc.id]['k2'][j]
k1_array = pa.get(k1_prop)
k2_array = pa.get(k2_prop)
current_array = pa.get(update_prop)
updated_array = current_array + 0.5*dt*(k2_array - k1_array)
pa.set(**{update_prop:updated_array})
def integrate(self, dt):
# set the initial arrays
self.set_initial_arrays()
# eval and step the non position calcs at the current state
self.do_step(self.ncalcs, dt)
self.cstep = 1
# eval and step the position calcs
self.do_step(self.pcalcs, dt)
# add correction for the positions
self.add_correction_for_position(dt)
#for calc in self.hcalcs:
# calc.sph('h')
# ensure all processors have reached this point, then update
self.particles.barrier()
self.particles.update()
# eval and step the non position calcs
self.eval(self.ncalcs)
for calc in self.icalcs:
self.final_step(calc, dt)
self.cstep = 1
##############################################################################
#`GSPHIntegrator` class
##############################################################################
class GSPHIntegrator(EulerIntegrator):
""" Euler integration of the system X' = F(X) with the formula:
X(t + h) = X + h*F(X)
"""
def step(self, dt):
""" Step the particle properties. """
# get the current stage of the integration
k_num = self.cstep
for array in self.arrays:
# get the mapping for this array and this stage
to_step = self.step_props[ array.name ][k_num]
for prop in to_step:
initial_prop = to_step[ prop ][0]
step_prop = to_step[ prop ][1]
initial_arr = array.get( initial_prop )
step_arr = array.get( step_prop )
updated_array = initial_arr + step_arr * dt
array.set( **{prop:updated_array} )
# store the acceleration arrays
if prop in ['u','v','w']:
self.velocity_accelerations[array.name][k_num][step_prop] = step_arr
vstar = prop + "star"
star = array.get(vstar)
star = initial_arr + 0.5 * step_arr*dt
array.set( **{vstar:star})
# Increment the step by 1
self.cstep += 1
###########################################################################
integration_methods = [('Euler', EulerIntegrator),
('LeapFrog', LeapFrogIntegrator),
('RK2', RK2Integrator),
('RK4', RK4Integrator),
('PredictorCorrector', PredictorCorrectorIntegrator),
]
| [
[
1,
0,
0.0012,
0.0012,
0,
0.66,
0,
79,
0,
1,
0,
0,
79,
0,
0
],
[
1,
0,
0.0024,
0.0012,
0,
0.66,
0.0833,
653,
0,
1,
0,
0,
653,
0,
0
],
[
1,
0,
0.0047,
0.0012,
0,
0.... | [
"from pysph.sph.sph_calc import SPHCalc",
"from pysph.sph.funcs.arithmetic_funcs import PropertyGet",
"import numpy",
"import logging",
"logger = logging.getLogger()",
"class Integrator(object):\n \"\"\" The base class for all integrators. Currently, the following \n integrators are supported:\n ... |
import numpy
class TimeStep(object):
def compute_time_step(self, solver):
return solver.dt
class ViscousTimeStep(TimeStep):
def __init__(self, cfl, co, particles):
self.cfl = cfl
self.co = co
self.particles = particles
def compute_time_step(self, solver):
cfl = self.cfl
co = self.co
# take dt to be some large value
dt = 1
arrays = self.particles.arrays
for array in arrays:
if array.properties.has_key('dt_fac'):
h, dt_fac = array.get('h','dt_fac')
_dt = numpy.min( cfl * h/(co + numpy.max(dt_fac)) )
# choose the minimum time step from all arrays
dt = min( _dt, dt )
return dt
class ViscousAndForceBasedTimeStep(ViscousTimeStep):
def compute_time_step(self, solver):
# compute the time step based on the viscous criterion
dt = ViscousTimeStep.compute_time_step(self, solver)
# compute the acceleration based time step
integrator = solver.integrator
arrays = self.particles.arrays
for array in arrays:
if array.properties.has_key("_a_u_1"):
fmax = integrator.get_max_acceleration(array, solver)
h = array.get("h")
_dt = self.cfl * numpy.min( numpy.sqrt(h/fmax) )
dt = min( dt, _dt )
return dt
class VelocityBasedTimeStep(object):
def __init__(self, particles, cfl=0.3,):
self.cfl = cfl
self.particles = particles
def compute_time_step(self, solver):
v = float('inf')
for pa in solver.particles.arrays:
val = min(pa.h/(pa.cs+(pa.u**2+pa.v**2+pa.w**2)**0.5))
if val < v:
v = val
return self.cfl*v
| [
[
1,
0,
0.0141,
0.0141,
0,
0.66,
0,
954,
0,
1,
0,
0,
954,
0,
0
],
[
3,
0,
0.0634,
0.0563,
0,
0.66,
0.25,
373,
0,
1,
0,
0,
186,
0,
0
],
[
2,
1,
0.0775,
0.0282,
1,
0.... | [
"import numpy",
"class TimeStep(object):\n\n def compute_time_step(self, solver):\n return solver.dt",
" def compute_time_step(self, solver):\n return solver.dt",
" return solver.dt",
"class ViscousTimeStep(TimeStep):\n\n def __init__(self, cfl, co, particles):\n self.cf... |
from integrator import Integrator
from cl_utils import HAS_CL, get_pysph_root, get_cl_include,\
get_scalar_buffer, cl_read, get_real, enqueue_copy
if HAS_CL:
import pyopencl as cl
from os import path
import numpy
class CLIntegrator(Integrator):
def setup_integrator(self, context):
""" Setup the additional particle arrays for integration.
Parameters:
-----------
context -- the OpenCL context
setup_cl on the calcs must be called when all particle
properties on the particle array are created. This is
important as all device buffers will created.
"""
Integrator.setup_integrator(self)
self.setup_cl(context)
self.cl_precision = self.particles.get_cl_precision()
#self.step_props = ['_tmpx', '_tmpy', '_tmpz']
def setup_cl(self, context):
""" OpenCL setup """
self.context = context
for calc in self.calcs:
calc.setup_cl(context)
# setup the OpenCL Program
root = get_pysph_root()
src = cl_read(path.join(root, 'solver/integrator.cl'),
self.particles.get_cl_precision())
self.program = cl.Program(context, src).build(get_cl_include())
def reset_accelerations(self, step):
for array in self.arrays:
queue = array.queue
np = array.get_number_of_particles()
to_step = self.step_props[array.name][step]
for prop in to_step:
acc_prop = to_step[prop][1]
acc_buffer = array.get_cl_buffer( acc_prop )
self.program.set_to_zero(queue, (np,), (1,), acc_buffer).wait()
def save_initial_arrays(self):
""" Set the initial arrays for each calc
The initial array is the update property of a calc appended with _0
Note that multiple calcs can update the same property and this
will not replicate the creation of the initial arrays.
In OpenCL, we call the EnqueueCopyBuffer with source as the
current update property and destination as the initial
property array.
"""
for array in self.arrays:
queue = array.queue
initial_props = self.initial_properties[ array.name ]
for prop in initial_props:
src = array.get_cl_buffer( prop )
dst = array.get_cl_buffer( initial_props[prop] )
enqueue_copy(queue=queue, src=src, dst=dst)
# ncalcs = len(calcs)
# for i in range(ncalcs):
# calc = calcs[i]
# queue = calc.queue
# if calc.integrates:
# updates = calc.updates
# nupdates = len(updates)
# pa = self.arrays[calc.dnum]
# for j in range(nupdates):
# update_prop = updates[j]
# initial_prop = self.initial_props[calc.id][j]
# update_prop_buffer = pa.get_cl_buffer(update_prop)
# initial_prop_buffer = pa.get_cl_buffer(initial_prop)
# enqueue_copy(queue=queue, src=update_prop_buffer,
# dst=initial_prop_buffer)
# def reset_current_buffers(self, calcs):
# """ Reset the current arrays """
# ncalcs = len(calcs)
# for i in range(ncalcs):
# calc = calcs[i]
# queue = calc.queue
# if calc.integrates:
# updates = calc.updates
# nupdates = len(updates)
# pa = self.arrays[calc.dnum]
# for j in range(nupdates):
# update_prop = updates[j]
# initial_prop = self.initial_props[calc.id][j]
# # get the device buffers
# update_prop_buffer = pa.get_cl_buffer(update_prop)
# initial_prop_buffer = pa.get_cl_buffer(initial_prop)
# # reset the current property to the initial array
# enqueue_copy(queue=queue,src=initial_prop_buffer,
# dst=update_prop_buffer)
def eval(self):
""" Evaluate each calc and store in the k list if necessary """
calcs = self.calcs
ncalcs = len(calcs)
particles = self.particles
k_num = self.cstep
for i in range(ncalcs):
calc = calcs[i]
queue = calc.queue
updates = calc.updates
nupdates = calc.nupdates
# get the destination particle array for this calc
pa = dest = calc.dest
#print "Evaluating calc:: ", calc.id
if calc.integrates:
calc.sph( *calc.dst_writes[k_num] )
else:
calc.sph( *calc.updates )
#particles.barrier()
#self.rupdate_list[calc.dnum] = [update_prop]
#particles.update_remote_particle_properties(
# self.rupdate_list)
#ensure that the eval phase is completed for all processes
particles.barrier()
def step(self, dt):
""" Perform stepping for the integrating calcs """
cl_dt = get_real(dt, self.cl_precision)
# get the current stage of the integration
k_num = self.cstep
for array in self.arrays:
# get the number of particles
np = array.get_number_of_particles()
# get the command queue for the array
queue = array.queue
# get the mapping for this array and this stage
to_step = self.step_props[ array.name ][k_num]
for prop in to_step:
initial_prop = to_step[ prop ][0]
step_prop = to_step[ prop ][1]
prop_buffer = array.get_cl_buffer( prop )
step_buffer = array.get_cl_buffer( step_prop )
initial_buffer = array.get_cl_buffer( initial_prop )
self.program.step_array(queue, (np,1,1), (1,1,1),
initial_buffer, step_buffer,
prop_buffer, cl_dt)
self.cstep += 1
# for i in range(ncalcs):
# calc = calcs[i]
# queue = calc.queue
# if calc.integrates:
# updates = calc.updates
# nupdates = calc.nupdates
# # get the destination particle array for this calc
# pa = self.arrays[calc.dnum]
# np = pa.get_number_of_particles()
# for j in range(nupdates):
# update_prop = updates[j]
# k_prop = self.k_props[calc.id][k_num][j]
# current_buffer = pa.get_cl_buffer(update_prop)
# step_buffer = pa.get_cl_buffer(k_prop)
# tmp_buffer = pa.get_cl_buffer('_tmpx')
# self.program.step_array(queue, (np,1,1), (1,1,1),
# current_buffer, step_buffer,
# tmp_buffer, cl_dt)
# enqueue_copy(queue, src=tmp_buffer,
# dest=current_buffer)
# pass
# pass
# # Increment the step by 1
# self.cstep += 1
##############################################################################
#`CLEulerIntegrator` class
##############################################################################
class CLEulerIntegrator(CLIntegrator):
""" Euler integration of the system X' = F(X) with the formula:
X(t + h) = X + h*F(X)
"""
def __init__(self, particles, calcs):
CLIntegrator.__init__(self, particles, calcs)
self.nsteps = 1
def integrate(self, dt):
# set the initial buffers
self.save_initial_arrays()
# Euler step
self.reset_accelerations(step=1)
self.eval()
self.step(dt)
self.particles.update()
self.cstep = 1
##############################################################################
#`CLRK2Integrator` class
##############################################################################
class CLRK2Integrator(CLIntegrator):
""" RK2 Integration for the system X' = F(X) with the formula:
# Stage 1
K1 = F(X)
X(t + h/2) = X0 + h/2*K1
# Stage 2
K1 = F( X(t+h/2) )
X(t + h) = X0 + h * K1
"""
def __init__(self, particles, calcs):
CLIntegrator.__init__(self, particles, calcs)
self.nsteps = 1
def integrate(self, dt):
# set the initial arrays
self.save_initial_arrays() # X0 = X(t)
#############################################################
# Stage 1
#############################################################
self.reset_accelerations(step=1)
self.eval() # K1 = F(X)
self.step(dt/2) # F(X+h/2) = X0 + h/2*K1
self.particles.update()
self.cstep = 1
#############################################################
# Stage 2
#############################################################
self.reset_accelerations(step=1)
self.eval() # K1 = F( X(t+h/2) )
self.step(dt) # F(X+h) = X0 + h*K1
self.particles.update()
self.cstep = 1
##############################################################################
#`CLPredictorCorrectorIntegrator` class
##############################################################################
class CLPredictorCorrectorIntegrator(CLIntegrator):
""" Predictor Corrector Integration of a system X' = F(X) using the scheme
Predict:
X(t + h/2) = X0 + h/2 * F(X)
Correct:
X(t + h/2) = X0 + h/2 * F( X(t + h/2) )
Step:
X(t + h) = 2*X(t + h/2) - X0
"""
def __init__(self, particles, calcs):
CLIntegrator.__init__(self, particles, calcs)
self.nsteps = 1
def final_step(self):
for array in self.arrays:
to_step = self.step_props[array.name][1]
for prop in to_step:
current_buffer = array.get_cl_buffer( prop )
initial_buffer = array.get_cl_buffer( to_step[prop][0] )
self.program.pc_final_step( queue, (np,), (1,),
current_buffer,
initial_buffer).wait()
def integrate(self, dt):
# save the initial arrays
self.save_initial_arrays() # X0 = X(t)
############################################################
# Predict
############################################################
self.reset_accelerations(step=1)
self.eval() # K1 = F(X)
self.step(dt/2) # X(t+h/2) = X0 + h/2*K1
self.particles.update()
self.cstep = 1
##############################################################
# Correct
##############################################################
self.reset_accelerations(step=1)
self.eval() # K1 = F( X(t+h/2) )
self.step(dt/2) # X(t+h/2) = X0 + h/2*K1
self.particles.update()
##############################################################
# Step
##############################################################
self.final_step(dt) # X(t+h) = 2*X(t+h/2) - X0
self.particles.update()
self.cstep = 1
| [
[
1,
0,
0.0026,
0.0026,
0,
0.66,
0,
352,
0,
1,
0,
0,
352,
0,
0
],
[
1,
0,
0.0065,
0.0052,
0,
0.66,
0.125,
842,
0,
7,
0,
0,
842,
0,
0
],
[
4,
0,
0.0144,
0.0052,
0,
0... | [
"from integrator import Integrator",
"from cl_utils import HAS_CL, get_pysph_root, get_cl_include,\\\n get_scalar_buffer, cl_read, get_real, enqueue_copy",
"if HAS_CL:\n import pyopencl as cl",
" import pyopencl as cl",
"from os import path",
"import numpy",
"class CLIntegrator(Integrator):\n\... |
""" Post step functions for the solver """
import pickle
import os
import pysph.base.api as base
from pysph.base.cell import py_find_cell_id
class SaveCellManagerData(object):
"""Post-step function to save the cell manager's data.
Two files are created, 'neighbors' contains partile neighbor
information as returned by the neighbor locator. For each
particle, a LongArray for it's neighbor indices are stored.
The second file
'cells', holds cell data for each cell (partilce indices, coordinates)
"""
def __init__(self, rank = 0, path=None, count=10):
self.rank = rank
self.count = count
if path:
self.path = path
else:
self.path = "."
def eval(self, solver):
if not ((solver.count % self.count) == 0):
return
particles = solver.particles
time = solver.t
nnps = particles.nnps_manager
locator_cache = nnps.particle_locator_cache
num_locs = len(locator_cache)
locators = locator_cache.values()
fname_base = os.path.join(self.path+"/neighbors_"+str(self.rank))
cell_manager = particles.cell_manager
cell_size = cell_manager.cell_size
neighbor_idx = {}
for i in range(num_locs):
loc = locators[i]
dest = loc.dest
src = loc.source
particle_indices = dest.get('idx')
x, y, z = dest.get("x", "y", "z")
neighbor_idx[dest.name + '-' + src.name] = {}
d = neighbor_idx[dest.name + '-' + src.name]
nrp = dest.num_real_particles
for j in range(nrp):
neighbors = loc.py_get_nearest_particles(j)
temp = dest.extract_particles(neighbors)
particle_idx = particle_indices[j]
pnt = base.Point(x[j], y[j], z[j])
cid = py_find_cell_id(pnt, cell_size)
idx = temp.get_carray("idx")
d[particle_idx] = {'neighbors':idx, 'cid':cid}
fname = fname_base + "_" + dest.name + "_" + str(solver.count)
# save particle neighbor information.
f = open(fname, 'w')
pickle.dump(neighbor_idx, f)
f.close()
fname_cells = os.path.join(self.path+"/cells_"+str(self.rank))
fname_cells += "_" + str(solver.count)
# ask the cell manager to save the particle representation
cell_manager.get_particle_representation(fname_cells)
class CFLTimeStepFunction(object):
def __init__(self, CFL=0.3):
self.cfl = CFL
def eval(self, solver):
v = float('inf')
for pa in solver.particles.arrays:
val = min(pa.h/(pa.cs+(pa.u**2+pa.v**2+pa.w**2)**0.5))
if val < v:
v = val
solver.dt = self.cfl*v
| [
[
8,
0,
0.0097,
0.0097,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0291,
0.0097,
0,
0.66,
0.1667,
848,
0,
1,
0,
0,
848,
0,
0
],
[
1,
0,
0.0388,
0.0097,
0,
0.66... | [
"\"\"\" Post step functions for the solver \"\"\"",
"import pickle",
"import os",
"import pysph.base.api as base",
"from pysph.base.cell import py_find_cell_id",
"class SaveCellManagerData(object):\n \"\"\"Post-step function to save the cell manager's data.\n\n Two files are created, 'neighbors' con... |
# Standard imports.
import logging, os
from optparse import OptionParser, OptionGroup, Option
from os.path import basename, splitext, abspath
import sys
from utils import mkdir
# PySPH imports.
from pysph.base.particles import Particles, CLParticles, ParticleArray
from pysph.solver.controller import CommandManager
from pysph.solver.integrator import integration_methods
from pysph.base.nnps import NeighborLocatorType as LocatorType
import pysph.base.kernels as kernels
# MPI conditional imports
HAS_MPI = True
try:
from mpi4py import MPI
except ImportError:
HAS_MPI = False
else:
from pysph.parallel.load_balancer import LoadBalancer
from pysph.parallel.simple_parallel_manager import \
SimpleParallelManager
from pysph.parallel.parallel_cell import ParallelCellManager
from pysph.parallel.simple_block_manager import SimpleBlockManager
def list_option_callback(option, opt, value, parser):
val = value.split(',')
val.extend( parser.rargs )
setattr( parser.values, option.dest, val )
##############################################################################
# `Application` class.
##############################################################################
class Application(object):
""" Class used by any SPH application """
def __init__(self, load_balance=True, fname=None):
""" Constructor
Parameters
----------
load_balance : A boolean which determines if automatic load
balancing is to be performed or not
"""
self._solver = None
self._parallel_manager = None
# The initial distribution method name to pass to the LoadBalancer's
# `distribute_particles` method, can be one of ('auto', 'sfc', 'single'
# etc.)
self._distr_func = 'auto'
self.load_balance = load_balance
if fname == None:
fname = sys.argv[0].split('.')[0]
self.fname = fname
self.args = sys.argv[1:]
# MPI related vars.
self.comm = None
self.num_procs = 1
self.rank = 0
if HAS_MPI:
self.comm = comm = MPI.COMM_WORLD
self.num_procs = comm.Get_size()
self.rank = comm.Get_rank()
self._log_levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
'none': None}
self._setup_optparse()
self.path = None
def _setup_optparse(self):
usage = """
%prog [options]
Note that you may run this program via MPI and the run will be
automatically parallelized. To do this run::
$ mpirun -n 4 /path/to/your/python %prog [options]
Replace '4' above with the number of processors you have.
Below are the options you may pass.
"""
parser = OptionParser(usage)
self.opt_parse = parser
# Add some default options.
parser.add_option("-b", "--no-load-balance", action="store_true",
dest="no_load_balance", default=False,
help="Do not perform automatic load balancing "\
"for parallel runs.")
# -v
valid_vals = "Valid values: %s"%self._log_levels.keys()
parser.add_option("-v", "--loglevel", action="store",
type="string",
dest="loglevel",
default='warning',
help="Log-level to use for log messages. " +
valid_vals)
# --logfile
parser.add_option("--logfile", action="store",
type="string",
dest="logfile",
default=None,
help="Log file to use for logging, set to "+
"empty ('') for no file logging.")
# -l
parser.add_option("-l", "--print-log", action="store_true",
dest="print_log", default=False,
help="Print log messages to stderr.")
# --final-time
parser.add_option("--final-time", action="store",
type="float",
dest="final_time",
default=None,
help="Total time for the simulation.")
# --timestep
parser.add_option("--timestep", action="store",
type="float",
dest="time_step",
default=None,
help="Timestep to use for the simulation.")
# -q/--quiet.
parser.add_option("-q", "--quiet", action="store_true",
dest="quiet", default=False,
help="Do not print any progress information.")
# -o/ --output
parser.add_option("-o", "--output", action="store",
dest="output", default=self.fname,
help="File name to use for output")
# --output-freq.
parser.add_option("--freq", action="store",
dest="freq", default=20, type="int",
help="Printing frequency for the output")
# -d/ --detailed-output.
parser.add_option("-d", "--detailed-output", action="store_true",
dest="detailed_output", default=False,
help="Dump detailed output.")
# --directory
parser.add_option("--directory", action="store",
dest="output_dir", default=self.fname+'_output',
help="Dump output in the specified directory.")
# --kernel
parser.add_option("--kernel", action="store",
dest="kernel", type="int",
help="%-55s"%"The kernel function to use:"+
''.join(['%d - %-51s'%(d,s) for d,s in
enumerate(kernels.kernel_names)]))
# --hks
parser.add_option("--hks", action="store_true",
dest="hks", default=True,
help="""Perform the Hrenquist and Katz kernel
normalization for variable smothing lengths.""")
# -k/--kernel-correction
parser.add_option("-k", "--kernel-correction", action="store",
dest="kernel_correction", type="int",
default=-1,
help="""Use Kernel correction.
0 - Bonnet and Lok correction
1 - RKPM first order correction""")
# --integration
parser.add_option("--integration", action="store",
dest="integration", type="int",
help="%-55s"%"The integration method to use:"+
''.join(['%d - %-51s'%(d,s[0]) for d,s in
enumerate(integration_methods)]))
# --cl
parser.add_option("--cl", action="store_true", dest="with_cl",
default=False, help=""" Use OpenCL to run the
simulation on an appropriate device """)
# --parallel-mode
parser.add_option("--parallel-mode", action="store",
dest="parallel_mode", default="simple",
help = """Use 'simple' (which shares all particles)
or 'auto' (which does block based parallel
distribution of particles).""")
# --parallel-output-mode
parser.add_option("--parallel-output-mode", action="store",
dest="parallel_output_mode", default="collected",
help="""Use 'collected' to dump one output at
root or 'distributed' for every processor. """)
# solver interfaces
interfaces = OptionGroup(parser, "Interfaces",
"Add interfaces to the solver")
interfaces.add_option("--interactive", action="store_true",
dest="cmd_line", default=False,
help=("Add an interactive commandline interface "
"to the solver"))
interfaces.add_option("--xml-rpc", action="store",
dest="xml_rpc", metavar='[HOST:]PORT',
help=("Add an XML-RPC interface to the solver; "
"HOST=0.0.0.0 by default"))
interfaces.add_option("--multiproc", action="store",
dest="multiproc", metavar='[[AUTHKEY@]HOST:]PORT[+]',
default="pysph@0.0.0.0:8800+",
help=("Add a python multiprocessing interface "
"to the solver; "
"AUTHKEY=pysph, HOST=0.0.0.0, PORT=8800+ by"
" default (8800+ means first available port "
"number 8800 onwards)"))
interfaces.add_option("--no-multiproc", action="store_const",
dest="multiproc", const=None,
help=("Disable multiprocessing interface "
"to the solver"))
parser.add_option_group(interfaces)
# solver job resume support
parser.add_option('--resume', action='store', dest='resume',
metavar='COUNT|count|?',
help=('Resume solver from specified time (as stored '
'in the data in output directory); count chooses '
'a particular file; ? lists all '
'available files')
)
def _process_command_line(self):
""" Parse any command line arguments.
Add any new options before this is called. This also sets up
the logging automatically.
"""
(options, args) = self.opt_parse.parse_args(self.args)
self.options = options
# Setup logging based on command line options.
level = self._log_levels[options.loglevel]
#save the path where we want to dump output
self.path = abspath(options.output_dir)
mkdir(self.path)
if level is not None:
self._setup_logging(options.logfile, level,
options.print_log)
def _setup_logging(self, filename=None, loglevel=logging.WARNING,
stream=True):
""" Setup logging for the application.
Parameters
----------
filename : The filename to log messages to. If this is None
a filename is automatically chosen and if it is an
empty string, no file is used
loglevel : The logging level
stream : Boolean indicating if logging is also printed on
stderr
"""
# logging setup
self.logger = logger = logging.getLogger()
logger.setLevel(loglevel)
# Setup the log file.
if filename is None:
filename = splitext(basename(sys.argv[0]))[0] + '.log'
if len(filename) > 0:
lfn = os.path.join(self.path,filename)
if self.num_procs > 1:
logging.basicConfig(level=loglevel, filename=lfn,
filemode='w')
if stream:
logger.addHandler(logging.StreamHandler())
def _create_particles(self, variable_h, callable, min_cell_size=-1,
*args, **kw):
""" Create particles given a callable and any arguments to it.
This will also automatically distribute the particles among
processors if this is a parallel run. Returns the `Particles`
instance that is created.
"""
num_procs = self.num_procs
rank = self.rank
data = None
if rank == 0:
# Only master creates the particles.
pa = callable(*args, **kw)
distr_func = self._distr_func
if num_procs > 1:
# Use the offline load-balancer to distribute the data
# initially. Negative cell size forces automatic computation.
data = LoadBalancer.distribute_particles(pa,
num_procs=num_procs,
block_size=-1,
distr_func=distr_func)
if num_procs > 1:
# Now scatter the distributed data.
pa = self.comm.scatter(data, root=0)
self.particle_array = pa
in_parallel = num_procs > 1
if isinstance(pa, (ParticleArray,)):
pa = [pa]
no_load_balance = self.options.no_load_balance
if no_load_balance:
self.load_balance = False
else:
self.load_balance = True
if self.options.with_cl:
cl_locator_type = kw.get('cl_locator_type', None)
domain_manager_type = kw.get('domain_manager_type', None)
if cl_locator_type and domain_manager_type:
self.particles = CLParticles(
arrays=pa, cl_locator_type=cl_locator_type,
domain_manager_type=domain_manager_type)
else:
self.particles = CLParticles(arrays=pa)
else:
locator_type = kw.get('locator_type', None)
if locator_type:
if locator_type not in [LocatorType.NSquareNeighborLocator,
LocatorType.SPHNeighborLocator]:
msg = "locator type %d not understood"%(locator_type)
raise RuntimeError(msg)
else:
locator_type = LocatorType.SPHNeighborLocator
self.particles = Particles(arrays=pa, variable_h=variable_h,
in_parallel=in_parallel,
load_balancing=self.load_balance,
update_particles=True,
min_cell_size=min_cell_size,
locator_type=locator_type)
return self.particles
######################################################################
# Public interface.
######################################################################
def set_args(self, args):
self.args = args
def add_option(self, opt):
""" Add an Option/OptionGroup or their list to OptionParser """
if isinstance(opt, OptionGroup):
self.opt_parse.add_option_group(opt)
elif isinstance(opt, Option):
self.opt_parse.add_option(opt)
else:
# assume a list of Option/OptionGroup
for o in opt:
self.add_option(o)
def setup(self, solver, create_particles=None,
variable_h=False, min_cell_size=-1, **kwargs):
"""Set the application's solver. This will call the solver's
`setup` method.
The following solver options are set:
dt -- the time step for the solver
tf -- the final time for the simulationl
fname -- the file name for output file printing
freq -- the output print frequency
level -- the output detail level
dir -- the output directory
hks -- Hernquist and Katz kernel correction
eps -- the xsph correction factor
with_cl -- OpenCL related initializations
integration_type -- The integration method
default_kernel -- the default kernel to use for operations
Parameters
----------
create_particles : callable or None
If supplied, particles will be created for the solver using the
particle arrays returned by the callable. Else particles for the
solver need to be set before calling this method
variable_h : bool
If the particles created using create_particles have variable h
min_cell_size : float
minimum cell size for particles created using min_cell_size
"""
self._solver = solver
solver_opts = solver.get_options(self.opt_parse)
if solver_opts is not None:
self.add_option(solver_opts)
self._process_command_line()
options = self.options
if self.num_procs > 1:
if options.parallel_mode == 'simple':
self.set_parallel_manager(SimpleParallelManager())
if options.parallel_mode == "block":
self.set_parallel_manager( SimpleBlockManager() )
if create_particles:
self._create_particles(variable_h, create_particles, min_cell_size,
**kwargs)
pm = self._parallel_manager
if pm is not None:
self.particles.parallel_manager = pm
pm.initialize(self.particles)
self._solver.setup_solver(options.__dict__)
dt = options.time_step
if dt is not None:
solver.set_time_step(dt)
tf = options.final_time
if tf is not None:
solver.set_final_time(tf)
#setup the solver output file name
fname = options.output
if HAS_MPI:
comm = self.comm
rank = self.rank
if not self.num_procs == 0:
fname += '_' + str(rank)
# set the rank for the solver
solver.rank = self.rank
solver.pid = self.rank
solver.comm = self.comm
# set the in parallel flag for the solver
if self.num_procs > 1:
solver.in_parallel = True
# output file name
solver.set_output_fname(fname)
# output print frequency
solver.set_print_freq(options.freq)
# output printing level (default is not detailed)
solver.set_output_printing_level(options.detailed_output)
# output directory
solver.set_output_directory(abspath(options.output_dir))
# set parallel output mode
solver.set_parallel_output_mode(options.parallel_output_mode)
# default kernel
if options.kernel is not None:
solver.default_kernel = getattr(kernels,
kernels.kernel_names[options.kernel])(dim=solver.dim)
# Hernquist and Katz kernel correction
# TODO. Fix the Kernel and Gradient Correction
#solver.set_kernel_correction(options.kernel_correction)
# OpenCL setup for the solver
solver.set_cl(options.with_cl)
if options.resume is not None:
solver.particles = self.particles # needed to be able to load particles
r = solver.load_output(options.resume)
if r is not None:
print 'available files for resume:'
print r
sys.exit(0)
if options.integration is not None:
solver.integrator_type =integration_methods[options.integration][1]
# setup the solver
solver.setup(self.particles)
# print options for the solver
#solver.set_arrays_to_print(options.arrays_to_print)
# add solver interfaces
self.command_manager = CommandManager(solver, self.comm)
solver.set_command_handler(self.command_manager.execute_commands)
if self.rank == 0:
# commandline interface
if options.cmd_line:
from pysph.solver.solver_interfaces import CommandlineInterface
self.command_manager.add_interface(CommandlineInterface().start)
# XML-RPC interface
if options.xml_rpc:
from pysph.solver.solver_interfaces import XMLRPCInterface
addr = options.xml_rpc
idx = addr.find(':')
host = "0.0.0.0" if idx == -1 else addr[:idx]
port = int(addr[idx+1:])
self.command_manager.add_interface(XMLRPCInterface((host,port)).start)
# python MultiProcessing interface
if options.multiproc:
from pysph.solver.solver_interfaces import MultiprocessingInterface
addr = options.multiproc
idx = addr.find('@')
authkey = "pysph" if idx == -1 else addr[:idx]
addr = addr[idx+1:]
idx = addr.find(':')
host = "0.0.0.0" if idx == -1 else addr[:idx]
port = addr[idx+1:]
if port[-1] == '+':
try_next_port = True
port = port[:-1]
else:
try_next_port = False
port = int(port)
interface = MultiprocessingInterface((host,port), authkey,
try_next_port)
self.command_manager.add_interface(interface.start)
self.logger.info('started multiprocessing interface on %s'%(
interface.address,))
def run(self):
"""Run the application."""
self._solver.solve(not self.options.quiet)
def set_parallel_manager(self, mgr):
"""Set the parallel manager class to use."""
self._parallel_manager = mgr
if isinstance(mgr, SimpleParallelManager):
self._distr_func = 'auto'
| [
[
1,
0,
0.0034,
0.0017,
0,
0.66,
0,
715,
0,
2,
0,
0,
715,
0,
0
],
[
1,
0,
0.0051,
0.0017,
0,
0.66,
0.0769,
323,
0,
3,
0,
0,
323,
0,
0
],
[
1,
0,
0.0068,
0.0017,
0,
... | [
"import logging, os",
"from optparse import OptionParser, OptionGroup, Option",
"from os.path import basename, splitext, abspath",
"import sys",
"from utils import mkdir",
"from pysph.base.particles import Particles, CLParticles, ParticleArray",
"from pysph.solver.controller import CommandManager",
"f... |
""" A simple shock tube solver """
from optparse import OptionGroup, Option
import numpy
import pysph.base.api as base
import pysph.sph.api as sph
from solver import Solver
from sph_equation import SPHOperation, SPHIntegration
from integrator import GSPHIntegrator
Fluids = base.Fluid
Solids = base.Solid
Boundary = base.Boundary
def standard_shock_tube_data(name="", type=0, cl_precision="double",
nl=320, nr=80, smoothing_length=None, **kwargs):
""" Standard 400 particles shock tube problem """
dxl = 0.6/nl
dxr = dxl*4
x = numpy.ones(nl+nr)
x[:nl] = numpy.arange(-0.6, -dxl+1e-10, dxl)
x[nl:] = numpy.arange(dxr, 0.6+1e-10, dxr)
m = numpy.ones_like(x)*dxl
h = numpy.ones_like(x)*2*dxr
if smoothing_length:
h = numpy.ones_like(x) * smoothing_length
rho = numpy.ones_like(x)
rho[nl:] = 0.25
u = numpy.zeros_like(x)
e = numpy.ones_like(x)
e[:nl] = 2.5
e[nl:] = 1.795
p = 0.4*rho*e
cs = numpy.sqrt(1.4*p/rho)
idx = numpy.arange(nl+nr)
return base.get_particle_array(name=name,x=x,m=m,h=h,rho=rho,p=p,e=e,
cs=cs,type=type, idx=idx,
cl_precision=cl_precision)
############################################################################
# `ShockTubeSolver` class
############################################################################
class ShockTubeSolver(Solver):
def __init__(self, dim, integrator_type, alpha=1.0, beta=1.0,
gamma=1.4, xsph_eps=0):
self.dim = dim
self.defaults = dict(alpha=alpha,
beta=beta,
gamma=gamma,
xsph_eps=xsph_eps)
# base class constructor
Solver.__init__(self, dim, integrator_type)
def get_options(self, opt_parser):
opt = OptionGroup(opt_parser, "ShockTubeSolver options")
opt.add_option("--alpha", action="store", type="float",
dest="alpha", default=self.defaults["alpha"],
help="Set the artificial viscosity parameter alpha")
opt.add_option("--beta", action="store", type="float",
dest="beta", default=self.defaults["alpha"],
help="Set the artificial viscosity parameter beta")
opt.add_option("--gamma", action="store", type="float",
dest="gamma", default=self.defaults["gamma"],
help="Set the ratio of specific heats gamma")
opt.add_option("--xsph-eps", action="store", type="float",
dest="xsph_eps", default=self.defaults.get("xsph_eps"),
help="Constant for XSPH")
return opt
def setup_solver(self, options=None):
options = options or self.defaults
gamma = options.get("gamma")
alpha = options.get("alpha")
beta = options.get("beta")
hks = options.get("hks")
xsph_eps = options.get("xsph_eps")
vel_updates=["u","v","w"][:self.dim]
pos_updates=["x","y","z"][:self.dim]
###################################################################
# Add the operations
###################################################################
# Summation density
self.add_operation(SPHOperation(
sph.SPHRho.withargs(hks=hks),
on_types=[Fluids], from_types=[Fluids, base.Boundary],
updates=['rho'], id = 'density')
)
# Equation of state
self.add_operation(SPHOperation(
sph.IdealGasEquation.withargs(gamma=gamma),
on_types = [Fluids],
updates=['p', 'cs'],
id='eos')
)
# Momentum equation
self.add_operation(SPHIntegration(
sph.MomentumEquation.withargs(alpha=alpha, beta=beta, hks=hks),
on_types=[Fluids], from_types=[Fluids, base.Boundary],
updates=vel_updates,
id='mom')
)
# XSPH correction : defaults to eps = 0
self.add_operation(SPHIntegration(
sph.XSPHCorrection.withargs(eps=xsph_eps),
on_types=[base.Fluid,], from_types=[base.Boundary, base.Fluid],
updates=vel_updates,
id="xsph")
)
# Energy Equation
self.add_operation(SPHIntegration(
sph.EnergyEquation.withargs(hks=hks),
from_types=[Fluids, base.Boundary],
on_types=[Fluids], updates=['e'],
id='enr')
)
# Position Step
self.add_operation(SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[Fluids,],
updates=pos_updates,
id="step")
)
############################################################################
# `ADKEShockTubeSolver` class
############################################################################
class ADKEShockTubeSolver(Solver):
def __init__(self, dim, integrator_type, h0, eps, k, g1, g2, alpha, beta,
gamma=1.4, xsph_eps=0,
kernel=base.CubicSplineKernel, hks=True):
# solver dimension
self.dim = dim
# Hernquist and Katz normalization
self.hks = hks
# the SPH kernel to use
self.kernel = kernel(dim)
self.defaults = dict(alpha=alpha,
beta=beta,
gamma=gamma,
adke_eps=eps,
adke_k=k,
adke_h0=h0,
g1=g1,
g2=g2,
xsph_eps=xsph_eps)
# base class constructor
Solver.__init__(self, dim, integrator_type)
def get_options(self, opt_parser):
opt = OptionGroup(opt_parser, "ADKEShockTubeSolver options")
opt.add_option("--alpha", action="store", type="float",
dest="alpha", default=self.defaults["alpha"],
help="Set the artificial viscosity parameter alpha")
opt.add_option("--beta", action="store", type="float",
dest="beta", default=self.defaults["alpha"],
help="Set the artificial viscosity parameter beta")
opt.add_option("--gamma", action="store", type="float",
dest="gamma", default=self.defaults["gamma"],
help="Set the ratio of specific heats gamma")
opt.add_option("--adke-eps", action="store", type="float",
dest="adke_eps", default=self.defaults.get("adke_eps"),
help="Sensitivity parameter eps for the ADKE pocedure")
opt.add_option("--adke-k", action="store", type="float",
dest="adke_k", default=self.defaults.get("adke_k"),
help="Scaling parameter k for the ADKE pocedure")
opt.add_option("--adke-h0", action="store", type="float",
dest="adke_h0", default=self.defaults.get("adke_h0"),
help="Initial smoothing length h0 for the ADKE pocedure")
opt.add_option("--g1", action="store", type="float",
dest="g1", default=self.defaults.get("g1"),
help="Artificial heating term coefficient g1")
opt.add_option("--g2", action="store", type="float",
dest="g2", default=self.defaults.get("g2"),
help="Artificial heating term coefficient g2")
opt.add_option("--xsph-eps", action="store", type="float",
dest="xsph_eps", default=self.defaults.get("xsph_eps"),
help="Constant for XSPH")
return opt
def setup_solver(self, options=None):
options = options or self.defaults
hks = options.get("hks")
kernel = self.kernel
# ADKE parameters
h0 = options.get("adke_h0")
eps = options.get("adke_eps")
k = options.get("adke_k")
# Artificial heat parameters
g1 = options.get("g1")
g2 = options.get("g2")
# Artificial viscosity parameters
alpha = options.get("alpha")
beta = options.get("beta")
gamma = options.get("gamma")
xsph_eps = options.get("xsph_eps")
vel_updates=["u","v","w"][:self.dim]
pos_updates=["x","y","z"][:self.dim]
###################################################################
# Add the operations
###################################################################
# reset the smoothing length to h0
self.add_operation(SPHOperation(
sph.SetSmoothingLength.withargs(h0=h0),
on_types=[base.Fluid,],
updates=["h"],
id="setsmoothing")
)
# pilot rho estimate
self.add_operation(SPHOperation(
sph.ADKEPilotRho.withargs(h0=h0),
on_types=[base.Fluid,], from_types=[base.Fluid, base.Boundary],
updates=['rhop'], id='adke_rho'),
)
# smoothing length update
self.add_operation(SPHOperation(
sph.ADKESmoothingUpdate.withargs(h0=h0, k=k, eps=eps, hks=hks),
on_types=[base.Fluid,],
updates=['h'], id='adke'),
)
# summation density
self.add_operation(SPHOperation(
sph.SPHRho.withargs(hks=hks),
on_types=[base.Fluid,], from_types=[base.Fluid,base.Boundary],
updates=['rho'], id = 'density')
)
# ideal gas equation
self.add_operation(SPHOperation(
sph.IdealGasEquation.withargs(gamma=gamma),
on_types = [base.Fluid,], updates=['p', 'cs'], id='eos')
)
# velocity divergence
self.add_operation(SPHOperation(
sph.VelocityDivergence.withargs(hks=hks),
on_types=[base.Fluid], from_types=[base.Fluid, base.Boundary],
updates=['div'], id='vdivergence'),
)
# conduction coefficient update
self.add_operation(SPHOperation(
sph.ADKEConductionCoeffUpdate.withargs(g1=g1, g2=g2),
on_types=[base.Fluid],
updates=['q'], id='qcoeff'),
)
# momentum equation
self.add_operation(SPHIntegration(
sph.MomentumEquation.withargs(alpha=alpha, beta=beta, hks=hks),
from_types=[base.Fluid, base.Boundary], on_types=[base.Fluid],
updates=vel_updates, id='mom')
)
# XSPH correction : defaults to eps = 0
self.add_operation(SPHIntegration(
sph.XSPHCorrection.withargs(eps=xsph_eps, hks=hks),
on_types=[base.Fluid,], from_types=[base.Boundary, base.Fluid],
updates=vel_updates,
id="xsph")
)
# energy equation
self.add_operation(SPHIntegration(
sph.EnergyEquation.withargs(hks=hks,alpha=alpha, beta=beta,
gamma=gamma),
on_types=[base.Fluid], from_types=[base.Fluid, base.Boundary],
updates=['e'],
id='enr')
)
# artificial heat
self.add_operation(SPHIntegration(
sph.ArtificialHeat.withargs(eta=0.1, hks=hks),
on_types=[base.Fluid], from_types=[base.Fluid,base.Boundary],
updates=['e'],
id='aheat'),
)
# position step
self.add_operation(SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[Fluids,],
updates=pos_updates,
id="step")
)
############################################################################
# `MonaghanShockTubeSolver` class
############################################################################
class MonaghanShockTubeSolver(Solver):
def __init__(self, dim, integrator_type, h0, eps, k,
beta=1.0, K=1.0, f=0.5, gamma=1.4,
xsph_eps=0.0, summation_density=True,
kernel=base.CubicSplineKernel, hks=True):
# set the solver dimension
self.dim = dim
# Hernquist and Katz normalization
self.hks = hks
# the SPH kernel to use
self.kernel = kernel(dim)
# set the defaults
self.defaults = dict(gamma=gamma,
adke_eps=eps, adke_k=k, adke_h0=h0,
beta=beta, K=K, f=f,
xsph_eps=xsph_eps,
summation_density=summation_density)
# base class constructor
Solver.__init__(self, dim, integrator_type)
def get_options(self, opt_parser):
opt = OptionGroup(opt_parser, "MonaghanShockTubeSolver options")
opt.add_option("--sd", action="store_true",
dest="summation_density",
default=self.defaults["summation_density"],
help="Use summation density for the density equation")
opt.add_option("--gamma", action="store", type="float",
dest="gamma", default=self.defaults["gamma"],
help="Set the ratio of specific heats gamma")
opt.add_option("--adke-eps", action="store", type="float",
dest="adke_eps", default=self.defaults.get("adke_eps"),
help="Sensitivity parameter eps for the ADKE pocedure")
opt.add_option("--adke-k", action="store", type="float",
dest="adke_k", default=self.defaults.get("adke_k"),
help="Scaling parameter k for the ADKE pocedure")
opt.add_option("--adke-h0", action="store", type="float",
dest="adke_h0", default=self.defaults.get("adke_h0"),
help="Initial smoothing length h0 for the ADKE pocedure")
opt.add_option("--beta", action="store", type="float",
dest="beta", default=self.defaults["beta"],
help="Constant 'beta' for the signal viscosity")
opt.add_option("--f", action="store", type="float",
dest="f", default=self.defaults.get("beta"),
help="Constant 'f' for the signal viscosity")
opt.add_option("--K", action="store", type="float",
dest="K", default=self.defaults.get("K"),
help="Constant 'K' for the signal viscosity")
opt.add_option("--xsph-eps", action="store", type="float",
dest="xsph_eps", default=self.defaults.get("xsph_eps"),
help="Constant for XSPH")
return opt
def setup_solver(self, options=None):
options = options or self.defaults
hks = options.get("hks")
# ADKE parameters
h0 = options.get("adke_h0")
eps = options.get("adke_eps")
k = options.get("adke_k")
# Artificial viscosity parameters
beta = options.get("beta")
K = options.get("K")
f = options.get("f")
gamma = options.get("gamma")
# XSPH eps
xsph_eps = options.get("xsph_eps")
# summation density
sd = options.get("summation_density")
vel_updates=["u","v","w"][:self.dim]
pos_updates=["x","y","z"][:self.dim]
###################################################################
# Add the operations
###################################################################
# reset the smoothing length to h0
self.add_operation(SPHOperation(
sph.SetSmoothingLength.withargs(h0=h0),
on_types=[base.Fluid,],
updates=["h"],
id="setsmoothing")
)
# pilot rho estimate
self.add_operation(SPHOperation(
sph.ADKEPilotRho.withargs(h0=h0),
on_types=[Fluids,], from_types=[Fluids,Boundary],
updates=['rhop'], id='adke_rho'),
)
# smoothing length update
self.add_operation(SPHOperation(
sph.ADKESmoothingUpdate.withargs(h0=h0, k=k, eps=eps, hks=hks),
on_types=[Fluids,],
updates=['h'], id='adke'),
)
# summation density if requested
if sd:
self.add_operation(SPHOperation(
sph.SPHRho.withargs(hks=hks),
on_types=[base.Fluid,], from_types=[base.Fluid, base.Boundary],
updates=["rho"],
id="summation_density")
)
# ideal gas eos
self.add_operation(SPHOperation(
sph.IdealGasEquation.withargs(gamma=gamma),
on_types = [base.Fluid],
updates=['p', 'cs'],
id='eos')
)
# density rate if not summation density
if not sd:
self.add_operation(SPHIntegration(
sph.SPHDensityRate.withargs(hks=hks),
on_types=[base.Fluid,], from_types=[base.Boundary, base.Fluid],
updates=["rho"],
id="densityrate")
)
# momentum equation pressure gradient
self.add_operation(SPHIntegration(
sph.SPHPressureGradient.withargs(hks=hks),
on_types=[base.Fluid,], from_types=[base.Boundary, base.Fluid],
updates=vel_updates,
id="pgrad")
)
# momentum equation artificial viscosity
self.add_operation(SPHIntegration(
sph.MomentumEquationSignalBasedViscosity.withargs(beta=beta,
K=K,
hks=hks),
on_types=[base.Fluid,], from_types=[base.Boundary, base.Fluid],
updates=vel_updates,
id="visc")
)
# XSPH correction : defaults to eps = 0
self.add_operation(SPHIntegration(
sph.XSPHCorrection.withargs(eps=xsph_eps, hks=hks),
on_types=[base.Fluid,], from_types=[base.Boundary, base.Fluid],
updates=vel_updates,
id="xsph")
)
# energy equation
self.add_operation(SPHIntegration(
sph.EnergyEquationWithSignalBasedViscosity.withargs(beta=beta,
K=K,f=f,
hks=hks),
on_types=[base.Fluid,], from_types=[base.Boundary, base.Fluid],
updates=["e"],
id="energy")
)
# position step
self.add_operation(SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[Fluids,],
updates=pos_updates,
id="step")
)
############################################################################
# `ShockTubeSolver` class
############################################################################
class GSPHShockTubeSolver(Solver):
def __init__(self, dim, integrator_type=None):
self.dim = dim
integrator_type = GSPHIntegrator
# base class constructor
Solver.__init__(self, dim, integrator_type)
self.default_kernel = base.GaussianKernel(dim)
def get_options(self, opt_parser):
pass
def setup_solver(self, options=None):
###################################################################
# Add the operations
###################################################################
hks=False
vel_updates=["u","v","w"][:self.dim]
pos_updates=["x","y","z"][:self.dim]
# Summation density
self.add_operation(SPHOperation(
sph.SPHRho.withargs(hks=hks),
on_types=[Fluids], from_types=[Fluids, base.Boundary],
updates=['rho'], id = 'density')
)
# Equation of state
self.add_operation(SPHOperation(
sph.IdealGasEquation.withargs(gamma=1.4),
on_types = [Fluids],
updates=['p', 'cs'],
id='eos')
)
# Momentum equation
self.add_operation(SPHIntegration(
sph.GSPHMomentumEquation.withargs(gamma=1.4),
on_types=[Fluids], from_types=[Fluids, base.Boundary],
updates=vel_updates,
id='mom')
)
# Energy Equation
self.add_operation(SPHIntegration(
sph.GSPHEnergyEquation.withargs(hks=hks),
from_types=[Fluids, base.Boundary],
on_types=[Fluids], updates=['e'],
id='enr')
)
# Position Step
self.add_operation(SPHIntegration(
sph.PositionStepping.withargs(),
on_types=[Fluids,],
updates=pos_updates,
id="step")
)
| [
[
8,
0,
0.0015,
0.0015,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0044,
0.0015,
0,
0.66,
0.0667,
323,
0,
2,
0,
0,
323,
0,
0
],
[
1,
0,
0.0059,
0.0015,
0,
0.66... | [
"\"\"\" A simple shock tube solver \"\"\"",
"from optparse import OptionGroup, Option",
"import numpy",
"import pysph.base.api as base",
"import pysph.sph.api as sph",
"from solver import Solver",
"from sph_equation import SPHOperation, SPHIntegration",
"from integrator import GSPHIntegrator",
"Flui... |
"""
Classes for generators of some simple elements.
"""
# standard imports
import logging
logger = logging.getLogger()
import numpy
# local imports
from pysph.base.carray import DoubleArray, LongArray
from pysph.base.nnps import *
from pysph.base.point import Point
from pysph.solver.particle_generator import *
from pysph.solver.particle_generator import MassComputationMode as MCM
from pysph.solver.particle_generator import DensityComputationMode as DCM
###############################################################################
# `compute_particle_mass` function.
###############################################################################
def compute_particle_mass(parray, kernel, density=1000.0, h=0.1, dim=3):
"""
Given a particle array, kernel, target density and interaction radius, find
the mass of each particle.
Note that this method works only when the particle radius is constant. This
may also compute incorrect values when the particle cofiguration has voids
within.
"""
centroid = Point(0, 0, 0)
dist = DoubleArray(0)
indices = LongArray(0)
x = parray.get('x')
centroid.x = numpy.sum(x)/float(len(x))
y = None
z = None
logger.debug('particles to compute_particle_mass %d'%(len(x)))
if dim > 1:
y = parray.get('y')
centroid.y = numpy.sum(y)/float(len(y))
if dim > 2:
z = parray.get('z')
centroid.z = numpy.sum(z)/float(len(z))
else:
z = numpy.zeros(len(x), dtype=numpy.float)
else:
y = numpy.zeros(len(x), dtype=numpy.float)
z = y
logger.debug('Centroid : %s'%(centroid))
radius = kernel.radius()
# find the nearest points in parray of the centroid.
brute_force_nnps(pnt=centroid, search_radius=h*radius,
xa=x, ya=y, za=z,
neighbor_indices=indices,
neighbor_distances=dist)
k = 0.0
logger.info('Number of neighbors : %d'%(indices.length))
pnt = Point()
for i in range(indices.length):
pnt.x = x[indices[i]]
pnt.y = y[indices[i]]
pnt.z = z[indices[i]]
k += kernel.py_function(centroid, pnt, h)
logger.info('Kernel sum : %f'%(k))
logger.info('Requested density : %f'%(density))
m = float(density/k)
logger.info('Computed mass : %f'%(m))
return m
###############################################################################
# `find_best_particle_spacing' function.
###############################################################################
def find_best_particle_spacing(length=1.0,
initial_spacing=0.1,
end_points_exact=True,
tolerance=1e-09):
"""
Given the length and initial_spacing return a (possibly) corrected
particle spacing and the number of points.
"""
if length <= tolerance:
return initial_spacing, 0
n_intervals = int(numpy.floor(length/initial_spacing))
if end_points_exact:
r = length - n_intervals*initial_spacing
new_spacing = initial_spacing + float(r/n_intervals)
else:
new_spacing = initial_spacing
r = length - n_intervals*initial_spacing
if r > numpy.fabs(length - ((n_intervals+1)*initial_spacing)):
n_intervals += 1
return new_spacing, (n_intervals+1)
###############################################################################
# `LineGenerator` class.
###############################################################################
class LineGenerator(ParticleGenerator):
"""
Generate a line of points.
"""
def __init__(self,
output_particle_arrays=[],
particle_mass=-1.0,
mass_computation_mode=MCM.Compute_From_Density,
particle_density=1000.0,
density_computation_mode=DCM.Set_Constant,
particle_h=0.1,
kernel=None,
start_point=Point(0, 0, 0),
end_point=Point(0, 0, 1),
particle_spacing=0.05,
end_points_exact=True,
tolerance=1e-09,
*args, **kwargs):
"""
"""
self.start_point = Point(start_point.x,
start_point.y,
start_point.z)
self.end_point = Point(end_point.x,
end_point.y,
end_point.z)
self.particle_spacing = particle_spacing
self.end_points_exact = end_points_exact
self.tolerance = tolerance
def get_coords(self):
"""
Returns 3 numpy arrays representing the coordinates of the generated
points.
"""
dir = self.end_point - self.start_point
distance = dir.length()
if distance <= self.tolerance:
x = numpy.asarray([], dtype=float)
y = numpy.asarray([], dtype=float)
z = numpy.asarray([], dtype=float)
return x, y, z
normal = dir/distance
new_spacing, np = find_best_particle_spacing(
length=distance,
initial_spacing=self.particle_spacing,
end_points_exact=self.end_points_exact,
tolerance=self.tolerance)
x = numpy.zeros(np, dtype=float)
y = numpy.zeros(np, dtype=float)
z = numpy.zeros(np, dtype=float)
for i in range(np):
x[i] = self.start_point.x + i*normal.x*new_spacing
y[i] = self.start_point.y + i*normal.y*new_spacing
z[i] = self.start_point.z + i*normal.z*new_spacing
return x, y, z
def validate_setup(self):
"""
"""
return ParticleGenerator.validate_setup(self)
def generate_func(self):
"""
Generate a complete particle array with the required properties
computed.
"""
# setup the output particle array as required.
self._setup_outputs()
# find the coordinates
x, y, z = self.get_coords()
# add the generated particles to the output particle array
output = self.output_particle_arrays[0]
output.add_particles(x=x, y=y, z=z)
# check if 'h' has to be set.
if self.particle_h > 0.:
output.h[:] = self.particle_h
# check if density has to be set.
if self.density_computation_mode == DCM.Set_Constant:
output.rho[:] = self.particle_density
# check if mass has to be set.
if self.mass_computation_mode == MCM.Set_Constant:
output.m[:] = self.particle_mass
elif self.mass_computation_mode == MCM.Compute_From_Density:
m = compute_particle_mass(density=self.particle_density,
h=self.particle_h,
parray=output,
kernel=self.kernel,
dim=3)
output.m[:] = m
def num_output_arrays(self):
"""
Return the number of output particles arrays this generator will be
generating.
"""
return 1
###############################################################################
# `RectangleGenerator` class.
###############################################################################
class RectangleGenerator(ParticleGenerator):
"""
Class to generate rectangles of particles - filled and hollow.
"""
def __init__(self,
input_particle_arrays=[],
particle_mass=-1.0,
mass_computation_mode=MCM.Compute_From_Density,
particle_density=1000.0,
density_computation_mode=DCM.Set_Constant,
particle_h=0.1,
kernel=None,
filled=True,
start_point=Point(0, 0, 0),
end_point=Point(1, 1, 0),
particle_spacing_x1=0.1,
particle_spacing_x2=0.1,
end_points_exact=True,
tolerance=1e-09,
*args, **kwargs):
"""
"""
ParticleGenerator.__init__(self,
input_particle_arrays=input_particle_arrays,
particle_mass=particle_mass,
mass_computation_mode=mass_computation_mode,
particle_density=particle_density,
density_computation_mode=density_computation_mode,
particle_h=particle_h,
kernel=kernel)
self.filled = filled
self.start_point = Point(start_point.x, start_point.y, start_point.z)
self.end_point = Point(end_point.x, end_point.y, end_point.z)
self.particle_spacing_x1 = particle_spacing_x1
self.particle_spacing_x2 = particle_spacing_x2
self.end_points_exact = end_points_exact
self.tolerance = tolerance
def num_output_arrays(self):
"""
"""
return 1
def validate_setup(self):
"""
Make sure the input is valid.
"""
if ParticleGenerator.validate_setup(self) == False:
return False
return self._validate_input_points()
def _validate_input_points(self):
"""
Make sure a proper rectangle has been requested by the input points.
"""
dir = [0, 0, 0]
if self.start_point.x != self.end_point.x:
dir[0] = 1
if self.start_point.y != self.end_point.y:
dir[1] = 1
if self.start_point.z != self.end_point.z:
dir[2] = 1
if sum(dir) != 2:
msg = 'Incorrect input points specified'
msg += '\n'
msg += str(self.start_point)+' , '+str(self.end_point)
logger.error(msg)
return False
return True
def get_coords(self):
"""
"""
# based on the input points, decide which is the plane this rectangle is
# going to lie on.
if self._validate_input_points() is False:
return None
dir = [0, 0, 0]
if self.start_point.x != self.end_point.x:
dir[0] = 1
if self.start_point.y != self.end_point.y:
dir[1] = 1
if self.start_point.z != self.end_point.z:
dir[2] = 1
if dir[0] == 1:
if dir[1] == 1:
x, y, z = self._generate_x_y_rectangle()
else:
x, y, z = self._generate_x_z_rectangle()
else:
x, y, z = self._generate_y_z_rectangle()
return x, y, z
def _generate_x_y_rectangle(self):
"""
Generate a rectangle in the x-y plane.
"""
if self.start_point.x < self.end_point.x:
start_x1 = self.start_point.x
end_x1 = self.end_point.x
else:
start_x1 = self.end_point.x
end_x1 = self.start_point.x
if self.start_point.y < self.end_point.y:
start_x2 = self.start_point.y
end_x2 = self.end_point.y
else:
start_x2 = self.end_point.y
end_x2 = self.start_point.y
spacing1 = self.particle_spacing_x1
spacing2 = self.particle_spacing_x2
x, y = self.generate_rectangle_coords(start_x1=start_x1,
start_x2=start_x2,
end_x1=end_x1,
end_x2=end_x2,
spacing1=spacing1,
spacing2=spacing2)
z = numpy.zeros(len(x))
return x, y, z
def _generate_x_z_rectangle(self):
"""
Generate a rectangle in the x-z plane.
"""
if self.start_point.x < self.end_point.x:
start_x1 = self.start_point.x
end_x1 = self.end_point.x
else:
start_x1 = self.end_point.x
end_x1 = self.start_point.x
if self.start_point.z < self.end_point.z:
start_x2 = self.start_point.z
end_x2 = self.end_point.z
else:
start_x2 = self.end_point.z
end_x2 = self.start_point.z
spacing1 = self.particle_spacing_x1
spacing2 = self.particle_spacing_x2
x, z = self.generate_rectangle_coords(start_x1=start_x1,
start_x2=start_x2,
end_x1=end_x1,
end_x2=end_x2,
spacing1=spacing1,
spacing2=spacing2)
y = numpy.zeros(len(x))
return x, y, z
def _generate_y_z_rectangle(self):
"""
Generate a rectangle in the y-z plane.
"""
if self.start_point.y < self.end_point.y:
start_x1 = self.start_point.y
end_x1 = self.end_point.y
else:
start_x1 = self.end_point.y
end_x1 = self.start_point.y
if self.start_point.z < self.end_point.z:
start_x2 = self.start_point.z
end_x2 = self.end_point.z
else:
start_x2 = self.end_point.z
end_x2 = self.start_point.z
spacing1 = self.particle_spacing_x1
spacing2 = self.particle_spacing_x2
y, z = self.generate_rectangle_coords(start_x1=start_x1,
start_x2=start_x2,
end_x1=end_x1,
end_x2=end_x2,
spacing1=spacing1,
spacing2=spacing2)
x = numpy.zeros(len(y))
return x, y, z
def generate_rectangle_coords(self, start_x1, start_x2, end_x1, end_x2,
spacing1, spacing2):
"""
Generates a rectangle from the given start and end points, with the
given spacing.
"""
width = end_x1-start_x1
height = end_x2-start_x2
if width <= 0.0 or height <= 0.0 or spacing1 <= 0.0 or spacing2 <= 0:
msg = 'Incorrect values :\n'
msg = 'width=%f, height=%f, spacing1=%f, spacing1=%f'%(
width, height, spacing1, spacing2)
raise ValueError, msg
new_spacing1, n1 = find_best_particle_spacing(length=width,
initial_spacing=spacing1,
end_points_exact=\
self.end_points_exact,
tolerance=self.tolerance)
new_spacing2, n2 = find_best_particle_spacing(length=height,
initial_spacing=spacing2,
end_points_exact=\
self.end_points_exact,
tolerance=self.tolerance)
if self.filled == False:
n2 -= 2
n = 2*n1 + 2*n2
else:
n = n1*n2
x1 = numpy.zeros(n, dtype=float)
x2 = numpy.zeros(n, dtype=float)
if self.filled is True:
pindx = 0
for i in range(n1):
for j in range(n2):
x1[pindx] = start_x1 + i*new_spacing1
x2[pindx] = start_x2 + j*new_spacing2
pindx += 1
else:
pindx = 0
# generate the bottom horizontal lines
for i in range(n1):
x1[pindx] = start_x1 + i*new_spacing1
x2[pindx] = start_x2
pindx += 1
end_x1 = x1[pindx-1]
# now generate the left vertical line
for i in range(n2):
x1[pindx] = start_x1
x2[pindx] = start_x2 + (i+1)*new_spacing2
pindx += 1
end_x2 = x2[pindx-1] + new_spacing2
# the top
for i in range(n1):
x1[pindx] = start_x1 + i*new_spacing1
x2[pindx] = end_x2
pindx += 1
# the right side
for i in range(n2):
x1[pindx] = end_x1
x2[pindx] = start_x2 + (i+1)*new_spacing2
pindx += 1
return x1, x2
def generate_func(self):
"""
Generate a complete particle array with the required properties
computed.
"""
# setup the output particle array as required.
self._setup_outputs()
# find the coordinates
x, y, z = self.get_coords()
# add the generated particles to the output particle array
output = self.output_particle_arrays[0]
output.add_particles(x=x, y=y, z=z)
# check if 'h' has to be set.
if self.particle_h > 0.:
output.h[:] = self.particle_h
# check if density has to be set.
if self.density_computation_mode == DCM.Set_Constant:
output.rho[:] = self.particle_density
# check if mass has to be set.
if self.mass_computation_mode == MCM.Set_Constant:
output.m[:] = self.particle_mass
elif self.mass_computation_mode == MCM.Compute_From_Density:
m = compute_particle_mass(density=self.particle_density,
h=self.particle_h,
parray=output,
kernel=self.kernel,
dim=3)
output.m[:] = m
###############################################################################
# `CuboidGenerator` class.
###############################################################################
class CuboidGenerator(ParticleGenerator):
"""
Class to generate cuboids of particles (filled and hollow).
"""
def __init__(self,
output_particle_arrays=[],
particle_mass=-1.0,
mass_computation_mode=MCM.Compute_From_Density,
particle_density=1000.0,
density_computation_mode=DCM.Set_Constant,
particle_h=0.1,
kernel=None,
filled=True,
exclude_top=False,
start_point=Point(0, 0, 0),
end_point=Point(1, 1, 1),
particle_spacing_x=0.1,
particle_spacing_y=0.1,
particle_spacing_z=0.1,
end_points_exact=True,
tolerance=1e-09,
*args,
**kwargs):
"""
Constructor.
"""
self.filled = filled
self.exclude_top = exclude_top
self.start_point = Point(start_point.x,
start_point.y,
start_point.z)
self.end_point = Point(end_point.x,
end_point.y,
end_point.z)
self.particle_spacing_x = particle_spacing_x
self.particle_spacing_y = particle_spacing_y
self.particle_spacing_z = particle_spacing_z
self.end_points_exact = end_points_exact
self.tolerance = tolerance
def num_output_arrays(self):
"""
Number of particle arrays generated by this generated.
"""
return 1
def validate_setup(self):
"""
Make sure the input is valid.
"""
if ParticleGenerator.validate_setup(self) == False:
return False
return self._validate_input_points()
def _validate_input_points(self):
"""
Make sure the end points input are proper.
"""
length = self.end_point.x - self.start_point.x
depth = self.end_point.y - self.start_point.y
width = self.end_point.z - self.start_point.z
if (self.particle_spacing_x < 0.0 or
self.particle_spacing_y < 0.0 or
self.particle_spacing_z < 0.0 or
abs(length)-self.particle_spacing_x < 0. or
abs(depth)-self.particle_spacing_y < 0. or
abs(width)-self.particle_spacing_z < 0.):
msg = 'Incorrect input paramters specified'
logger.error(msg)
return False
return True
def get_coords(self):
"""
Returns 3 numpy arrays representing the coordinates of the generated points.
"""
start_point, end_point, length, depth, width = self._get_end_points()
particle_spacing_x, nx = find_best_particle_spacing(
length=length,
initial_spacing=self.particle_spacing_x)
particle_spacing_y, ny = find_best_particle_spacing(
length=depth,
initial_spacing=self.particle_spacing_y)
particle_spacing_z, nz = find_best_particle_spacing(
length=width,
initial_spacing=self.particle_spacing_z)
logger.info('x-spacing : %f, nx : %d'\
%(particle_spacing_x, nx))
logger.info('y-spacing : %f, ny : %d'\
%(particle_spacing_y, ny))
logger.info('z-spacing : %f, nz : %d'\
%(particle_spacing_z, nz))
if self.filled == True:
return self._generate_filled_cuboid(start_point, end_point,
nx, ny, nz,
particle_spacing_x,
particle_spacing_y,
particle_spacing_z)
else:
return self._generate_empty_cuboid(start_point, end_point,
nx, ny, nz,
particle_spacing_x,
particle_spacing_y,
particle_spacing_z)
def generate_func(self):
"""
Generate the complete particle array with the required properties computed.
"""
self._setup_outputs()
# compute the coords.
x, y, z= self.get_coords()
# add the generated particles to the output particle array
output = self.output_particle_arrays[0]
output.add_particles(x=x, y=y, z=z)
# check if 'h' has to be set
if self.particle_h > 0.:
output.h[:] = self.particle_h
# check if the density is to be set.
if self.density_computation_mode == DCM.Set_Constant:
output.rho[:] = self.particle_density
# check if the mass has to be computed.
if self.mass_computation_mode == MCM.Set_Constant:
output.m[:] = self.particle_mass
elif self.mass_computation_mode == MCM.Compute_From_Density:
m = compute_particle_mass(density=self.particle_density,
h=self.particle_h,
parray=output,
kernel=self.kernel,
dim=3)
output.m[:] = m
def _get_end_points(self):
"""
Return changed end points so that start_point to end_point is moving in
positive direction for all coords.
"""
length = self.end_point.x - self.start_point.x
depth = self.end_point.y - self.start_point.y
width = self.end_point.z - self.start_point.z
start_point = Point()
end_point = Point()
if length < 0:
start_point.x = self.end_point.x
end_point.x = self.start_point.x
else:
start_point.x = self.start_point.x
end_point.x = self.end_point.x
if depth < 0:
start_point.y = self.end_point.y
end_point.y = self.start_point.y
else:
start_point.y = self.start_point.y
end_point.y = self.end_point.y
if width < 0:
start_point.z = self.end_point.z
end_point.z = self.start_point.z
else:
start_point.z = self.start_point.z
end_point.z = self.end_point.z
return start_point, end_point, abs(length), abs(depth), abs(width)
def _generate_empty_cuboid(self, start_point, end_point, nx, ny, nz,
particle_spacing_x, particle_spacing_y,
particle_spacing_z):
"""
"""
logger.info('Input num pts : %d %d %d'%(nx, ny, nz))
if self.exclude_top is True:
ny -= 1
n = 0
n += 2*nx*ny # for the z-max and z-min planes
nz -= 2
if self.exclude_top is False:
n += 2*nx*nz # for the y-max and y-min planes
ny -= 2
else:
n += nx*nz
ny -= 1
n += 2*ny*nz # for the x-max and x-min planes
if self.exclude_top is False:
ny += 2
else:
ny += 1
nz += 2
x = numpy.zeros(n, dtype=float)
y = numpy.zeros(n, dtype=float)
z = numpy.zeros(n, dtype=float)
pindx = 0
logger.info('Now using num pts : %d %d %d'%(nx, ny, nz))
logger.info('Computed number of points : %d'%(n))
# generate the z-min and max planes
for i in range(nx):
for j in range(ny):
x[pindx] = start_point.x + i*particle_spacing_x
y[pindx] = start_point.y + j*particle_spacing_y
z[pindx] = start_point.z
pindx += 1
for i in range(nx):
for j in range(ny):
x[pindx] = start_point.x + i*particle_spacing_x
y[pindx] = start_point.y + j*particle_spacing_y
z[pindx] = end_point.z
pindx += 1
# generate the bottom and top planes
for i in range(nx):
for k in range(nz-2):
x[pindx] = start_point.x + i*particle_spacing_x
y[pindx] = start_point.y
z[pindx] = start_point.z + (k+1)*particle_spacing_z
pindx += 1
if self.exclude_top is False:
for i in range(nx):
for k in range(nz-2):
x[pindx] = start_point.x + i*particle_spacing_x
y[pindx] = end_point.y
z[pindx] = start_point.z + (k+1)*particle_spacing_z
pindx += 1
# generate the left and right planes
if self.exclude_top is True:
ny += 1
for j in range(ny-2):
for k in range(nz-2):
x[pindx] = start_point.x
y[pindx] = start_point.y + (j+1)*particle_spacing_y
z[pindx] = start_point.z + (k+1)*particle_spacing_z
pindx += 1
for j in range(ny-2):
for k in range(nz-2):
x[pindx] = end_point.x
y[pindx] = start_point.y + (j+1)*particle_spacing_y
z[pindx] = start_point.z + (k+1)*particle_spacing_z
pindx += 1
logger.info('Last pindx value : %d'%(pindx))
return x, y, z
def _generate_filled_cuboid(self, start_point, end_point, nx, ny, nz,
particle_spacing_x, particle_spacing_y,
particle_spacing_z):
"""
"""
n = nx*ny*nz
x = numpy.zeros(n, dtype=float)
y = numpy.zeros(n, dtype=float)
z = numpy.zeros(n, dtype=float)
pindx = 0
for i in range(nx):
for j in range(ny):
for k in range(nz):
x[pindx] = start_point.x + i*particle_spacing_x
y[pindx] = start_point.y + j*particle_spacing_y
z[pindx] = start_point.z + k*particle_spacing_z
pindx += 1
return x, y, z
| [
[
8,
0,
0.0024,
0.0036,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0073,
0.0012,
0,
0.66,
0.0714,
715,
0,
1,
0,
0,
715,
0,
0
],
[
14,
0,
0.0085,
0.0012,
0,
0.6... | [
"\"\"\"\nClasses for generators of some simple elements.\n\"\"\"",
"import logging",
"logger = logging.getLogger()",
"import numpy",
"from pysph.base.carray import DoubleArray, LongArray",
"from pysph.base.nnps import *",
"from pysph.base.point import Point",
"from pysph.solver.particle_generator impo... |
HAS_CL = True
try:
import pyopencl as cl
except ImportError:
HAS_CL=False
from os import path
import numpy
from utils import get_pysph_root
# Return all available devices on the host
def get_cl_devices():
""" Return a dictionary keyed on device type for all devices """
_devices = {'CPU':[], 'GPU':[]}
platforms = cl.get_platforms()
for platform in platforms:
devices = platform.get_devices()
for device in devices:
if device.type == cl.device_type.CPU:
_devices['CPU'].append(device)
elif device.type == cl.device_type.GPU:
_devices['GPU'].append(device)
return _devices
def create_context_from_cpu():
""" Create an OpenCL context using the CPU as the default device """
cl_devices = get_cl_devices()
if ( cl_devices['CPU'] == [] ):
raise ValueError("No CPU device found! ")
return cl.Context( devices=cl_devices['CPU'] )
def create_context_from_gpu():
""" Create an OpenCL context using the CPU as the default device """
cl_devices = get_cl_devices()
if ( cl_devices['GPU'] == [] ):
raise ValueError("No GPU device found! ")
return cl.Context( devices=cl_devices['GPU'] )
def create_some_context():
""" Create a "reasonable" context from the available devices.
Preference is given to CPU devices over GPU devices.
"""
devices = get_cl_devices()
cpu_devices = devices['CPU']
gpu_devices = devices['GPU']
if ( len( cpu_devices ) > 0 ):
context = cl.Context( devices = cpu_devices )
elif ( len( gpu_devices ) > 0 ):
context = cl.Context( devices = gpu_devices )
else:
raise ValueError("No devices found!")
return context
def iscpucontext(ctx):
"""Return True or False if the context is for a CPU device"""
for device in ctx.devices:
if device.type == cl.device_type.CPU:
return True
def isgpucontext(ctx):
for device in ctx.devices:
if device.type == cl.device_type.GPU:
return True
def get_cl_include():
""" Include directories for OpenCL definitions """
PYSPH_ROOT = get_pysph_root()
if cl.version.VERSION_TEXT == "2011.1beta3":
inc_dir = '-I'+path.join(PYSPH_ROOT, 'base') + " " + \
'-I'+path.join(PYSPH_ROOT, 'solver')
elif cl.version.VERSION_TEXT == "2011.1.1":
inc_dir = ["-I" + path.join(PYSPH_ROOT, "base"),
"-I" + path.join(PYSPH_ROOT, "solver") ]
else: # assume it is the latest version
inc_dir = ["-I" + path.join(PYSPH_ROOT, "base"),
"-I" + path.join(PYSPH_ROOT, "solver") ]
#raise RuntimeWarning("Not supported yet")
return inc_dir
def get_scalar_buffer(val, dtype, ctx):
""" Return a cl.Buffer object that can be passed as a scalar to kernels """
mf = cl.mem_flags
arr = numpy.array([val,], dtype)
return cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=arr)
def cl_read(filename, precision='double', function_name=None):
"""Read an OpenCL source file.
The function also adds a few convenient #define's so as to allow us
to write common code for both float and double precision. This is
done by specifying the `precision` argument which defaults to
'float'. The OpenCL code itself should be written to use REAL for
the type declaration. The word REAL will be #defined to change
precision on the fly. For conveinence REAL2, REAL3, REAL4 and REAL8
are all defined as well.
Parameters
----------
filename : str
Name of file to open.
precision : {'single', 'double'}, optional
The floating point precision to use.
function_name: str, optional
An optional function name to indicate a block to extract from
the OpenCL template file.
"""
if precision not in ['single', 'double']:
msg = "Invalid argument for 'precision' should be 'single'"\
" or 'double'."
raise ValueError(msg)
src = open(filename).read()
if function_name:
src = src.split('$'+function_name)[1]
if precision == 'single':
typ = 'float'
hdr = "#define F f \n"
else:
typ = 'double'
hdr = "#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n"
hdr += '#define F \n'
for x in ('', '2', '3', '4', '8'):
hdr += '#define REAL%s %%(typ)s%s\n'%(x, x)
hdr = hdr%(dict(typ=typ))
return hdr + src
def get_real(val, precision):
""" Return a suitable floating point number for OpenCL.
Parameters
----------
val : float
The value to convert.
precision : {'single', 'double'}
The precision to use.
"""
if precision == "single":
return numpy.float32(val)
elif precision == "double":
return numpy.float64(val)
else:
raise ValueError ("precision %s not supported!"%(precision))
def create_program(template, func, loc=None):
""" Create an OpenCL program given a template string and function
Parameters
----------
template: str
The template source file that is read using cl_read
func: SPHFunctionParticle
The function that provides the kernel arguments to the template
loc: NotImplemented
A template is the basic outline of an OpenCL kernel for a
SPHFunctionParticle. The arguments to the kernel and neighbor
looping code needs to be provided to render it a valid OpenCL
kenrel.
"""
k_args = []
func.set_cl_kernel_args()
k_args.extend(func.cl_args_name)
# Build the kernel args string.
kernel_args = ',\n '.join(k_args)
# Get the kernel workgroup code
workgroup_code = func.get_cl_workgroup_code()
# Construct the neighbor loop code.
neighbor_loop_code = "for (int src_id=0; src_id<nbrs; ++src_id)"
return template%(locals())
def enqueue_copy(queue, src, dst):
if cl.version.VERSION_TEXT == "2011.1beta3":
if ( isinstance(dst, cl.Buffer) ):
if ( isinstance(src, cl.Buffer) ):
# device to device copy
cl.enqueue_copy_buffer(queue, src=src, dst=dst)
elif ( isinstance(src, numpy.ndarray) ):
# host to device copy
cl.enqueue_write_buffer(queue, mem=dst, hostbuf=src)
elif ( isinstance(src, cl.Buffer) ):
cl.enqueue_read_buffer(queue, mem=src, hostbuf=dst)
elif cl.version.VERSION_TEXT == "2011.1.1":
cl.enqueue_copy(queue, dest=dst, src=src).wait()
else: # we assume that it is the latest version
cl.enqueue_copy(queue, dest=dst, src=src).wait()
queue.finish()
def round_up(n):
"""Round up 'n' to the nearest power of 2
The code here is borrowed from AMD APP SDK 2.5:
SDKCommon.cpp::roundToPowerOf2
"""
n -= 1
for i in range(8):
n |= n >> (1<<i)
n += 1
return n
def ispowerof2(val):
"""Test if the input is a power of 2"""
if( (val & (-val))-val == 0 & (val != 0)):
return True
else:
return False
def uint32mask():
"""Reserved value for 32 bit unsigned ints"""
return (1<<32) - 1
| [
[
14,
0,
0.0037,
0.0037,
0,
0.66,
0,
94,
1,
0,
0,
0,
0,
4,
0
],
[
7,
0,
0.0129,
0.0148,
0,
0.66,
0.0526,
0,
0,
1,
0,
0,
0,
0,
0
],
[
1,
1,
0.0111,
0.0037,
1,
0.67,
... | [
"HAS_CL = True",
"try:\n import pyopencl as cl\nexcept ImportError:\n HAS_CL=False",
" import pyopencl as cl",
" HAS_CL=False",
"from os import path",
"import numpy",
"from utils import get_pysph_root",
"def get_cl_devices():\n \"\"\" Return a dictionary keyed on device type for all dev... |
from numpy import arccos, sin, cos, array, sqrt, pi
r = 2.0/pi
dt = 1e-3
def force(x,y):
theta = arccos(x/sqrt((x**2+y**2)))
return array([-sin(theta), cos(theta)])
def rk2(nsteps=1000, x0=r, y0=0):
t = 0
xinitial = x0
yinitial = y0
while t < nsteps:
_x = xinitial
_y = yinitial
k1x, k1y = force(xinitial, yinitial)
xinitial = _x + 0.5*dt*k1x; yinitial = _y + 0.5*dt*k1y
k2x, k2y = force(xinitial, yinitial)
xnew = _x + (0.5*dt)*(k1x + k2x)
ynew = _y + (0.5*dt)*(k1y + k2y)
xinitial = xnew
yinitial = ynew
t += 1
pass
return xnew, ynew
def rk4(steps=1000, x0=r, y0=0):
t = 0
xinitial = x0
yinitial = y0
while t < steps:
_x = xinitial
_y = yinitial
k1x, k1y = force(xinitial, yinitial)
xinitial = _x + 0.5*dt*k1x; yinitial = _y + 0.5*dt*k1y
k2x, k2y = force(xinitial, yinitial)
xinitial =_x + 0.5*dt*k2x; yinitial = _y + 0.5*dt*k2y
k3x, k3y = force(xinitial, yinitial)
xinitial = _x + dt*k3x; yinitial = _y + dt*k3y
k4x, k4y = force(xinitial, yinitial)
xnew = _x + (dt/6.0)*(k1x + 2*k2x + 2*k3x + k4x)
ynew = _y + (dt/6.0)*(k1y + 2*k2y + 2*k3y + k4y)
xinitial = xnew
yinitial = ynew
t += 1
pass
return xnew, ynew
def euler(nsteps=1000, x0=r, y0=0):
t = 0
xinitial = x0
yinitial = y0
while t < nsteps:
k1x, k1y = dt*force(xinitial, yinitial)
xnew = xinitial + k1x
ynew = yinitial + k1y
xinitial = xnew
yinitial = ynew
t += 1
pass
return xnew, ynew
| [
[
1,
0,
0.0132,
0.0132,
0,
0.66,
0,
954,
0,
6,
0,
0,
954,
0,
0
],
[
14,
0,
0.0395,
0.0132,
0,
0.66,
0.1667,
436,
4,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0526,
0.0132,
0,
... | [
"from numpy import arccos, sin, cos, array, sqrt, pi",
"r = 2.0/pi",
"dt = 1e-3",
"def force(x,y):\n\n theta = arccos(x/sqrt((x**2+y**2)))\n return array([-sin(theta), cos(theta)])",
" theta = arccos(x/sqrt((x**2+y**2)))",
" return array([-sin(theta), cos(theta)])",
"def rk2(nsteps=1000, x0=... |
""" An example solver for the circular patch of fluid """
import numpy
import pysph.base.api as base
import pysph.sph.api as sph
from solver import Solver
from sph_equation import SPHOperation, SPHIntegration
Fluids = base.ParticleType.Fluid
Solids = base.ParticleType.Solid
def get_circular_patch(name="", type=0, dx=0.025/1.3,
cl_precision="single", **kwargs):
x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -1.05:1.05+1e-4:dx]
x = x.ravel()
y = y.ravel()
m = numpy.ones_like(x)*dx*dx
h = numpy.ones_like(x)*2*dx
rho = numpy.ones_like(x)
p = 0.5*1.0*100*100*(1 - (x**2 + y**2))
cs = numpy.ones_like(x) * 100.0
u = -100*x
v = 100*y
indices = []
for i in range(len(x)):
if numpy.sqrt(x[i]*x[i] + y[i]*y[i]) - 1 > 1e-10:
indices.append(i)
pa = base.get_particle_array(x=x, y=y, m=m, rho=rho, h=h, p=p, u=u, v=v,
cs=cs,name=name, type=type,
cl_precision=cl_precision)
la = base.LongArray(len(indices))
la.set_data(numpy.array(indices))
pa.remove_particles(la)
pa.set(idx=numpy.arange(len(pa.x)))
print 'Number of particles: ', len(pa.x)
return pa
class FluidSolver(Solver):
def setup_solver(self, options=None):
#create the sph operation objects
self.add_operation(SPHOperation(
sph.TaitEquation.withargs(co=100.0, ro=1.0),
on_types=[Fluids],
updates=['p', 'cs'],
id='eos')
)
self.add_operation(SPHIntegration(
sph.SPHDensityRate.withargs(hks=False),
from_types=[Fluids], on_types=[Fluids],
updates=['rho'],
id='density')
)
self.add_operation(SPHIntegration(
sph.MomentumEquation.withargs(alpha=0.01, beta=0.0, hks=False),
from_types=[Fluids], on_types=[Fluids],
updates=['u','v','w'],
id='mom')
)
self.add_operation_step([Fluids])
self.add_operation_xsph(eps=0.1, hks=False)
#############################################################################
| [
[
8,
0,
0.023,
0.0115,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.046,
0.0115,
0,
0.66,
0.1111,
954,
0,
1,
0,
0,
954,
0,
0
],
[
1,
0,
0.069,
0.0115,
0,
0.66,
... | [
"\"\"\" An example solver for the circular patch of fluid \"\"\"",
"import numpy",
"import pysph.base.api as base",
"import pysph.sph.api as sph",
"from solver import Solver",
"from sph_equation import SPHOperation, SPHIntegration",
"Fluids = base.ParticleType.Fluid",
"Solids = base.ParticleType.Solid... |
from integrator import EulerIntegrator, RK2Integrator, RK4Integrator,\
PredictorCorrectorIntegrator, LeapFrogIntegrator
from cl_integrator import CLEulerIntegrator
from sph_equation import SPHIntegration, SPHOperation
from solver import Solver
from shock_tube_solver import ShockTubeSolver, ADKEShockTubeSolver,\
MonaghanShockTubeSolver, GSPHShockTubeSolver
from fluid_solver import FluidSolver, get_circular_patch
import shock_tube_solver, fluid_solver
from basic_generators import LineGenerator, CuboidGenerator, RectangleGenerator
from particle_generator import DensityComputationMode, MassComputationMode, \
ParticleGenerator
from application import Application
from post_step_functions import SaveCellManagerData
from plot import ParticleInformation
from utils import savez, savez_compressed, get_distributed_particles, mkdir, \
get_pickled_data, get_pysph_root, load
from cl_utils import HAS_CL, get_cl_devices, get_cl_include, \
get_scalar_buffer, cl_read, get_real, create_program,\
create_context_from_cpu, create_context_from_gpu, create_some_context,\
enqueue_copy, round_up, uint32mask
from time_step_functions import ViscousTimeStep, ViscousAndForceBasedTimeStep,\
VelocityBasedTimeStep
| [
[
1,
0,
0.0395,
0.0526,
0,
0.66,
0,
352,
0,
5,
0,
0,
352,
0,
0
],
[
1,
0,
0.1053,
0.0263,
0,
0.66,
0.0714,
91,
0,
1,
0,
0,
91,
0,
0
],
[
1,
0,
0.1579,
0.0263,
0,
0.... | [
"from integrator import EulerIntegrator, RK2Integrator, RK4Integrator,\\\n PredictorCorrectorIntegrator, LeapFrogIntegrator",
"from cl_integrator import CLEulerIntegrator",
"from sph_equation import SPHIntegration, SPHOperation",
"from solver import Solver",
"from shock_tube_solver import ShockTubeSolver... |
#! /usr/bin/env python
# Author: Stefan Behnel <scoder@users.berlios.de>
# http://hg.cython.org/cython-devel/file/tip/Tools/cython-epydoc.py
#
# --------------------------------------------------------------------
import re
from epydoc import docstringparser as dsp
CYTHON_SIGNATURE_RE = re.compile(
# Class name (for builtin methods)
r'^\s*((?P<class>\w+)\.)?' +
# The function name
r'(?P<func>\w+)' +
# The parameters
r'\(((?P<self>(?:self|cls|mcs)),?)?(?P<params>.*)\)' +
# The return value (optional)
r'(\s*(->)\s*(?P<return>\w+(?:\s*\w+)))?' +
# The end marker
r'\s*(?:\n|$)')
parse_signature = dsp.parse_function_signature
def parse_function_signature(func_doc, doc_source,
docformat, parse_errors):
PYTHON_SIGNATURE_RE = dsp._SIGNATURE_RE
assert PYTHON_SIGNATURE_RE is not CYTHON_SIGNATURE_RE
try:
dsp._SIGNATURE_RE = CYTHON_SIGNATURE_RE
found = parse_signature(func_doc, doc_source,
docformat, parse_errors)
dsp._SIGNATURE_RE = PYTHON_SIGNATURE_RE
if not found:
found = parse_signature(func_doc, doc_source,
docformat, parse_errors)
return found
finally:
dsp._SIGNATURE_RE = PYTHON_SIGNATURE_RE
dsp.parse_function_signature = parse_function_signature
# --------------------------------------------------------------------
from epydoc.cli import cli
cli()
# --------------------------------------------------------------------
| [
[
1,
0,
0.1489,
0.0213,
0,
0.66,
0,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.1702,
0.0213,
0,
0.66,
0.1429,
173,
0,
1,
0,
0,
173,
0,
0
],
[
14,
0,
0.3191,
0.234,
0,
... | [
"import re",
"from epydoc import docstringparser as dsp",
"CYTHON_SIGNATURE_RE = re.compile(\n # Class name (for builtin methods)\n r'^\\s*((?P<class>\\w+)\\.)?' +\n # The function name\n r'(?P<func>\\w+)' +\n # The parameters\n r'\\(((?P<self>(?:self|cls|mcs)),?)?(?P<params>.*)\\)' +\n # T... |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Settings for the Au-to-do app."""
TEMPLATE_BASE_PATH = 'templates/'
# Prediction API credentials keyname.
CREDENTIALS_KEYNAME = 'prediction_credentials'
# OAuth 2.0 related constant.
CLIENT_ID = (
'your_client_id'
)
CLIENT_SECRET = 'your_client_secret'
# TODO(user): Make sure that all the scopes are included.
SCOPES = ['https://www.googleapis.com/auth/prediction']
USER_AGENT = 'au-to-do'
DOMAIN = 'anonymous'
# Whether or not to use memcache for caching of JSON models.
USE_MEMCACHE_FOR_JSON_MODELS = True
MEMCACHE_VERSION_PREFIX = '1-'
# Google Interoperable Access
GS_INTEROPERABLE_ACCESS = 'your_legacy_access_key'
GS_INTEROPERABLE_SECRET = 'your_legacy_access_secret'
GS_BUCKET = 'autodo-predictionmodels'
| [
[
8,
0,
0.3864,
0.0227,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.5,
0.0227,
0,
0.66,
0.0833,
884,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.5682,
0.0227,
0,
0.66,
... | [
"\"\"\"Settings for the Au-to-do app.\"\"\"",
"TEMPLATE_BASE_PATH = 'templates/'",
"CREDENTIALS_KEYNAME = 'prediction_credentials'",
"CLIENT_ID = (\n 'your_client_id'\n)",
"CLIENT_SECRET = 'your_client_secret'",
"SCOPES = ['https://www.googleapis.com/auth/prediction']",
"USER_AGENT = 'au-to-do'",
"... |
#!/usr/bin/env python
| [] | [] |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import logging
import re
import string
import StringIO
import unicodedata
from apiclient import errors
from apiclient.discovery import build
from oauth2client import client
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import gslite
import httplib2
import model
import settings
def CleanText(text, quote=False):
"""Transform a string into a string of terms suitable for a training set.
The Prediction API treats each word as a separate term, so make all
words lower case and remove all punctuation. This is one area where
experimenting with pre-processing can yield different prediction
fidelity, so it is likely that this function should be updated for
specialized parsing.
This implementation converts everything to ASCII.
Args:
text: A string to be cleaned.
quote: True if you want the results to be quoted.
Returns:
A string suitable for use in a .csv
"""
to_remove = string.whitespace + string.punctuation
text = unicodedata.normalize('NFKD', text).encode('ascii', 'replace')
replace = re.compile('[%s]' % re.escape(to_remove))
new_text = replace.sub(' ', text)
new_text = re.sub(' +', ' ', new_text)
new_text = new_text.lower()
if quote:
new_text = '\"%s\"' % new_text
return new_text
def ConcatenateMessages(incident):
"""Find all the Messages attached to an Incident and return bodies.
Args:
incident: Incident instance which is the parent of the Messages.
Returns:
A string, possibly very long, containing all the text from all the
Messages attached to an Incident.
"""
messages = model.Message.gql('WHERE incident = :1', incident.key())
return ' '.join(message.body for message in messages)
def RefreshTagsAndModels():
"""Update all the Tags and SuggestionModel.ui_tags."""
incidents = model.Incident.all()
for incident in incidents:
model.Tag.CreateMissingTags(incident)
tags = model.Tag.all()
for tag in tags:
model.SuggestionModel.CreateMissingModel(tag.name)
def BuildCSVRow(incident, tag=None, recycled=None):
"""Create an example row suitable for a training CSV file or streaming.
This incident makes some expensive calls on text processing and
data retrieval, so it returns all of the processed data in an
"opaque" dictionary. You can optionally pass this dictionary back
in to the function through recycled if you would like to save on
processing.
Args:
incident: a model.Incident to parse.
tag: String form of the tag name. If present it will always be
the first element of the returned string (per the Prediction
API training format).
recycled: "opaque" dictionary (for use only by this
function). Modified by reference, you should pass this object
back in if you are processing the same incident for multiple
tags.
Returns:
String suitable for a row in the prediction (no tag) or
training stream (with tag).
"""
if not recycled:
recycled = {}
if 'body' not in recycled:
recycled['body'] = CleanText(ConcatenateMessages(incident))
recycled['title'] = CleanText(incident.title)
row_items = [recycled['title'], recycled['body']]
if tag:
# Tag should not be CleanText'd because it must match exactly.
row_items.insert(0, tag)
return ','.join('\"%s\"' % item for item in row_items)
def BuildCSVTrainingSet(model_name, write_file, tag_counts, training=False):
"""Create a training set containing every example of a model.
Args:
model_name: String form of the model name.
write_file: IO-based object with 'write' method.
tag_counts: Dictionary of tags and a count of their examples.
training: True if this CSV will be used immediately for training,
to update the incident statistics
Returns:
Tuple containing:
* the set of tags added to the training set.
* the list of incicdent used for training.
"""
added_tags = set()
trained_incidents = []
model_tags = []
tags = model.Tag.all()
for tag in tags:
if model.Tag.ModelMatches(model_name, tag.name):
model_tags.append(tag.name)
incidents = model.Incident.all()
# TODO(user) Add a filter to debounce incidents which have been
# updated recently. The user may still be making changes.
# Note: "IN" queries are limited to 30 list elements (sub-queries)!
if len(model_tags) > 30:
logging.error('There are too many tags in %s to query with a single IN.',
model_name)
incidents.filter('accepted_tags IN', model_tags)
for incident in incidents:
processed_incident = {}
for tag in incident.accepted_tags:
if tag in model_tags:
added_tags.add(tag)
if training:
incident.trained_tags.append(tag)
write_file.write(BuildCSVRow(incident, tag=tag,
recycled=processed_incident))
write_file.write('\n')
if tag in tag_counts:
tag_counts[tag] += 1
else:
tag_counts[tag] = 1
trained_incidents.append(incident)
if training:
incident.trained_tags = list(set(incident.trained_tags))
incident.updated = datetime.utcnow()
incident.trained_date = incident.updated
# incident.training_review should remain unchanged because we
# only checked one model. This incident may belong to multiple
# models, some of which have already been trained.
return (added_tags, trained_incidents)
class Suggester(webapp.RequestHandler):
"""Learn and suggest tags for Incidents.
Learn from user-provided tags ("Accepted" tags)
and suggest tags for Incidents as Messages arrive.
"""
def _SuggestTags(self, key, service):
"""Get suggestions for tags from the Prediction API.
Updates the Incident with one suggested tag for each model.
Args:
key: Model Key for Incident to receive suggested tags.
service: Built API service class, pre-authorized for OAuth.
"""
incident = db.get(key)
if not incident:
logging.error('_SuggestTags: No Incident with id=' + key)
else:
csv_instance = BuildCSVRow(incident)
sample = {'input': {'csvInstance': [csv_instance]}}
model_list = model.SuggestionModel.all()
suggested = []
for suggestion_model in model_list:
if suggestion_model.training_examples:
prediction = service.trainedmodels().predict(
id=suggestion_model.training_file, body=sample).execute()
logging.info('Model:%s Prediction=%s', suggestion_model.name,
prediction)
# Only add labels that are not already assigned to the incident
if prediction['outputLabel'] not in incident.accepted_tags:
suggested.append(prediction['outputLabel'])
if suggested:
incident.suggested_tags = suggested
logging.info('_SuggestTags: Final Suggestions=%s', ','.join(suggested))
incident.PurgeJsonCache()
incident.updated = datetime.utcnow()
incident.put()
model.Tag.CreateMissingTags(incident)
def post(self):
"""Handle a POST request by returning suggestions from the prediction API.
POST Parameters:
incident_key: String form of Incident Key.
Returns:
Nothing. Modifies Incident.suggested_tags.
"""
logging.info('Suggester.post')
incident_key = self.request.get('incident_key')
if not incident_key:
logging.error('No incident_key provided')
return
else:
incident_key = db.Key(incident_key)
credentials = model.Credentials.get_by_key_name(
settings.CREDENTIALS_KEYNAME)
if credentials:
credentials = credentials.credentials
http = httplib2.Http()
http = credentials.authorize(http)
service = build('prediction', 'v1.4', http=http)
self._SuggestTags(incident_key, service)
class Trainer(webapp.RequestHandler):
"""Make Examples and train the Prediction Engine from the Examples."""
def _UpdateTraining(self, training):
"""Update the Prediction API training model with new models and examples.
Args:
training: The Prediction API training service, already authorized.
"""
trained_model_query = db.GqlQuery('SELECT * FROM SuggestionModel '
'WHERE training_examples > 0')
trained_model_names = {}
for trained_model in trained_model_query:
trained_model_names[trained_model.name] = trained_model.training_file
logging.info('TRAINED MODEL=%s', trained_model.name)
# Note on Query design: I originally wanted to select where
# updated>trained, but the right value (trained) cannot be another
# column in the Incident, it must be a constant. Instead I
# created a new field, training_review, which is True when
# training should look at the Incident for changes and False when
# the Incident has been processed.
# TODO(user): optimize training_review so that it is only set
# when tags change. Right now it is set whenever the Incident is
# updated.
updated_incidents = db.GqlQuery('SELECT * FROM Incident '
'WHERE training_review = TRUE')
for updated_incident in updated_incidents:
if updated_incident.title:
logging.info('UPDATED INCIDENT = ' + updated_incident.title)
processed_incident = {}
new_tags = (set(updated_incident.accepted_tags) -
set(updated_incident.trained_tags))
for new_tag in new_tags:
new_tag_model = model.Tag.ModelCategory(new_tag)['model']
if new_tag_model in trained_model_names:
example = BuildCSVRow(updated_incident, tag=new_tag,
recycled=processed_incident)
logging.info('%s\n\tROW = %s', trained_model_names[new_tag_model],
example)
current_model = model.SuggestionModel.get_by_key_name(new_tag_model)
gs_full_name = '%s/%s' % (settings.GS_BUCKET,
current_model.training_file)
csv_instance = {'label': new_tag, 'csvInstance': [example]}
# TODO(user) Check training result for success.
try:
training.update(
id=current_model.training_file, body=csv_instance).execute()
updated_incident.trained_tags.append(new_tag)
except errors.HttpError, error:
if 'Training running' not in error.content:
# Trained model insert failed, reset the training status for this
# tag.
logging.error(
'Failed to retrieve trained model %s', new_tag_model)
current_model.training_examples = 0
current_model.put()
except client.AccessTokenRefreshError:
logging.error('Failed to update training set %s', gs_full_name)
updated_incident.trained_tags = list(set(updated_incident.trained_tags))
updated_incident.training_review = False
updated_incident.put()
# Go through the untrained models second because they can ignore the
# training_review flag.
untrained_models = db.GqlQuery('SELECT * FROM SuggestionModel '
'WHERE training_examples = 0')
storage = gslite.GsClient(access_key=settings.GS_INTEROPERABLE_ACCESS,
secret=settings.GS_INTEROPERABLE_SECRET)
tag_counts = {}
for untrained_model in untrained_models:
logging.info('UNTRAINED MODEL = ' + untrained_model.name)
string_file = StringIO.StringIO()
tags, trained_incidents = BuildCSVTrainingSet(
untrained_model.name, string_file, tag_counts, training=True)
if len(tags) > 1:
gs_object_name = untrained_model.name
gs_full_name = '%s/%s.csv' % (settings.GS_BUCKET, gs_object_name)
body = {
'id': gs_object_name,
'storageDataLocation': gs_full_name
}
storage.put_object(
settings.GS_BUCKET, gs_object_name + '.csv', string_file,
extra_headers={'x-goog-acl': 'project-private'})
string_file.close()
# TODO(user) check result for success
training.insert(body=body).execute()
untrained_model.training_file = gs_object_name
untrained_model.training_date = datetime.utcnow()
untrained_model.training_examples = len(trained_incidents)
untrained_model.training_tags = tag_counts.keys()
untrained_model.put()
for incident in trained_incidents:
incident.put()
# Update the statistics in the related Tag
for tag in tag_counts:
tag_object = model.Tag.get_by_key_name(tag)
tag_object.example_count = tag_counts[tag]
tag_object.trained_count = tag_counts[tag]
tag_object.trained_date = datetime.utcnow()
tag_object.put()
def _DownloadCSV(self, model_name):
"""Generate a csv file suitable for use as a training set.
Provides download file and updates Tags in datastore.
Args:
model_name: model.name. All Accepted tags for this model will be
processed to create one training set.
"""
now = datetime.utcnow()
suggestion_model = model.SuggestionModel.get_by_key_name(model_name)
suggestion_model.export_file = '%s-%s.csv' % (model_name, now.isoformat())
disposition = 'attachment; filename=%s' % suggestion_model.export_file
self.response.headers['Content-Type'] = 'text/csv'
self.response.headers['Content-Disposition'] = disposition
tag_counts = {}
temp_file = StringIO.StringIO()
_, trained_incidents = BuildCSVTrainingSet(model_name, temp_file,
tag_counts)
self.response.out.write(temp_file.getvalue())
temp_file.close()
# Update the statistics in the related Tag
for tag in tag_counts:
tag_object = model.Tag.get_by_key_name(tag)
tag_object.example_count = tag_counts[tag]
tag_object.put()
# Update the statistics in the SuggestionModel
suggestion_model.export_date = now
suggestion_model.export_tags = tag_counts.keys()
suggestion_model.ui_tags = suggestion_model.export_tags
suggestion_model.export_examples = len(trained_incidents)
suggestion_model.put()
def _Refresh(self):
"""Force a new training set for all tags."""
RefreshTagsAndModels()
credentials = model.Credentials.get_by_key_name(
settings.CREDENTIALS_KEYNAME)
if credentials:
credentials = credentials.credentials
http = httplib2.Http()
http = credentials.authorize(http)
service = build('prediction', 'v1.4', http=http)
train = service.trainedmodels()
self._UpdateTraining(train)
def get(self):
"""Private endpoint for Cron job automatic training."""
if self.request.headers.get('X-AppEngine-Cron') == 'true':
logging.info('Refreshing tags training set from Cron job.')
self._Refresh()
else:
self.redirect('/')
def post(self):
"""Process requests to train or for training data.
Possible requests:
action=refresh: force a new training set for all tags with sufficient
Examples. Creates models as needed.
action=csv: download a comma-separated version of the given model.
"""
action = self.request.get('action')
model_name = self.request.get('model_name')
if action == 'csv':
self._DownloadCSV(model_name)
elif action == 'refresh':
self._Refresh()
self.redirect('/')
def main():
run_wsgi_app(webapp.WSGIApplication([
('/tasks/train', Trainer),
('/tasks/suggest', Suggester)]))
if __name__ == '__main__':
main()
| [
[
1,
0,
0.0448,
0.0022,
0,
0.66,
0,
426,
0,
1,
0,
0,
426,
0,
0
],
[
1,
0,
0.0471,
0.0022,
0,
0.66,
0.0417,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0493,
0.0022,
0,
... | [
"from datetime import datetime",
"import logging",
"import re",
"import string",
"import StringIO",
"import unicodedata",
"from apiclient import errors",
"from apiclient.discovery import build",
"from oauth2client import client",
"from google.appengine.ext import db",
"from google.appengine.ext ... |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configures all page handlers for the application."""
from google.appengine.dist import use_library
use_library('django', '1.2')
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from landing import LandingPage
from user_settings import UserSettingsPage
application = webapp.WSGIApplication(
[
('/', LandingPage),
('/settings', UserSettingsPage),
],
debug=True)
def main():
"""Runs the application."""
run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.3778,
0.0222,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.4889,
0.0222,
0,
0.66,
0.1111,
813,
0,
1,
0,
0,
813,
0,
0
],
[
8,
0,
0.5111,
0.0222,
0,
0.66... | [
"\"\"\"Configures all page handlers for the application.\"\"\"",
"from google.appengine.dist import use_library",
"use_library('django', '1.2')",
"from google.appengine.ext import webapp",
"from google.appengine.ext.webapp.util import run_wsgi_app",
"from landing import LandingPage",
"from user_settings... |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datastore reset page for the application."""
from datetime import datetime
import re
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import model
class DatastoreResetPage(webapp.RequestHandler):
"""Handler for datastore reset requests."""
def post(self):
"""Resets the datastore."""
# Ignore if not local.
if not re.search('(appspot)', self.request.host):
for incident in model.Incident.all():
incident.delete()
for message in model.Message.all():
message.delete()
user = users.get_current_user()
# Create an incident for the user.
self.CreateIncident('Incident for ' + user.nickname(), user.nickname())
# Creates an unassigned incident.
self.CreateIncident('Unassigned incident')
# Creates an incident assigned to 'some_user' if one doesn't exist.
if user.nickname() is not 'some_user':
self.CreateIncident('Incident for some_user',
owner='some_user')
# Creates an incident with the accepted tag of 'API-Test'.
self.CreateIncident('API-Test', accepted_tags=['API-Test'])
# Creates an incident with the accepted tag of 'Special-ToAssignTag'.
self.CreateIncident('To assign tag',
accepted_tags=['Special-ToAssignTag'])
# Creates an incident to be resolved.
self.CreateIncident('To resolve', accepted_tags=['Special-ToResolve'])
# Creates a resolved incident.
self.CreateIncident('Resolved', status='resolved')
def CreateIncident(self, title, owner='none', accepted_tags=None,
suggested_tags=None, status='new'):
"""Creates an incident with limited customization.
Args:
title: Title of the incident
owner: Optionally specifies the owner of the incident.
accepted_tags: Optional list of accepted_tags applied to the incident.
suggested_tags: Optional list of suggested_tags applied to the incident.
status: Optional string status for the new incident.
"""
# Set empty tags outside of the default constructor, in case we ever need
# to modify these later.
if not accepted_tags:
accepted_tags = []
if not suggested_tags:
suggested_tags = []
incident = model.Incident()
incident.title = title
incident.created = datetime.now()
incident.status = status
incident.owner = owner
incident.author = 'test@example.com'
incident.mailing_list = 'support@example.com'
incident.canonical_link = 'http://google.com'
incident.suggested_tags = suggested_tags
incident.accepted_tags = accepted_tags
incident.put()
self.CreateMessages(incident)
def CreateMessages(self, incident):
"""Creates messages associated with the supplied incident.
Args:
incident: Incident to which messages should be appended.
"""
in_reply_to = None
for j in range(2):
message = model.Message()
message.title = 'Message #' + str(j)
message.incident = incident
message.in_reply_to = in_reply_to
message.message_id = 'message-%s-%s' % (incident.key, str(j))
message.author = 'text@example.com'
message.body = 'Text'
message.sent = datetime.now()
message.mailing_list = 'support@example.com'
message.canonical_link = 'http://google.com'
message.put()
in_reply_to = message.message_id
application = webapp.WSGIApplication(
[
('/ds_reset', DatastoreResetPage)
],
debug=True)
def main():
"""Runs the application."""
run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1269,
0.0075,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1567,
0.0075,
0,
0.66,
0.1,
426,
0,
1,
0,
0,
426,
0,
0
],
[
1,
0,
0.1642,
0.0075,
0,
0.66,
... | [
"\"\"\"Datastore reset page for the application.\"\"\"",
"from datetime import datetime",
"import re",
"from google.appengine.api import users",
"from google.appengine.ext import webapp",
"from google.appengine.ext.webapp.util import run_wsgi_app",
"import model",
"class DatastoreResetPage(webapp.Requ... |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides utility functions for Tasks API."""
import logging
import traceback
from apiclient import errors
from apiclient.discovery import build
from oauth2client.appengine import StorageByKeyName
import httplib2
import model
def _BuildClient(credentials):
"""Build a Tasks client.
Args:
credentials: Credentials used to authorized requests.
Returns:
Tasks API client.
"""
http = httplib2.Http()
if credentials:
http = credentials.authorize(http)
return build('tasks', 'v1', http=http)
def _GetCredentialsAndSettings(user_email):
"""Retrieve the user's credentials and settings.
Args:
user_email: Email of the user to retrieve settings and credentials for.
Returns:
User's credentials and settings as a tuple.
"""
settings = model.UserSettings.get_by_key_name(user_email)
credentials = None
if settings:
credentials = StorageByKeyName(
model.UserSettings, user_email, 'tasks_credentials').get()
return credentials, settings
def AddTask(incident, client=None):
"""Retrieve the owner's settings and add a task if requested.
Args:
incident: Incident to add the task for.
client: TasksClient to use for Tasks API requests.
"""
if incident.owner and incident.owner != 'none':
credentials, settings = _GetCredentialsAndSettings(incident.owner)
if credentials and settings.add_to_tasks:
client = client or _BuildClient(credentials)
key_name = '%s' % incident.key().id()
incident_task = model.IncidentTask.get_by_key_name(key_name)
body = incident.ToTaskDict()
try:
task = client.tasks().insert(
tasklist=settings.task_list_id, body=body).execute()
except errors.HttpError:
logging.error(
'Exception occured inserting task for incident %s',
incident.key().id())
logging.error(traceback.format_exc()[:-1])
if incident_task:
# Task could not be added, remove reference.
incident_task.delete()
else:
if not incident_task:
incident_task = model.IncidentTask(key_name=key_name)
incident_task.task_id = task['id']
incident_task.task_list_id = settings.task_list_id
incident_task.owner = incident.owner
incident_task.put()
def RemoveTask(incident, client=None):
"""Retrieve the owner's settings and delete the incdent's task if existing.
Args:
incident: Incident to remove the task for.
client: TasksClient to use for Tasks API requests.
"""
incident_task = model.IncidentTask.get_by_key_name(
'%s' % incident.key().id())
if incident_task:
credentials, settings = _GetCredentialsAndSettings(incident_task.owner)
if credentials and settings.add_to_tasks:
client = client or _BuildClient(credentials)
try:
client.tasks().delete(
tasklist=incident_task.task_list_id,
task=incident_task.task_id).execute()
except errors.HttpError:
logging.error(
'Exception occured while deleting task %s - %s',
incident_task.task_list_id, incident_task.task_id)
logging.error(traceback.format_exc()[:-1])
else:
logging.warning(
'No owner or credentials found for IncidentTask %s',
incident.key().id())
incident_task.delete()
else:
logging.warning(
'No IncidentTask found for incident %s', incident.key().id())
def UpdateTask(incident, old_client=None, new_client=None):
"""Update Task information on an updated incident.
Args:
incident: New version of the incident.
old_client: TasksClient to use for Tasks API requests.
new_client: TasksClient to use for Tasks API requests.
"""
incident_task = model.IncidentTask.get_by_key_name(
'%s' % incident.key().id())
if not incident_task:
AddTask(incident, new_client)
else:
old_credentials = _GetCredentialsAndSettings(incident_task.owner)[0]
old_client = old_client or _BuildClient(old_credentials)
if incident_task.owner == incident.owner:
if old_credentials:
try:
old_task = old_client.tasks().get(
tasklist=incident_task.task_list_id,
task=incident_task.task_id).execute()
old_task = incident.ToTaskDict(old_task)
old_client.tasks().update(
tasklist=incident_task.task_list_id, task=incident_task.task_id,
body=old_task).execute()
except errors.HttpError:
logging.error(
'Exception occured while retrieving or updating task %s - %s',
incident_task.task_list_id, incident_task.task_id)
logging.error(traceback.format_exc()[:-1])
else:
logging.warning(
'No credentials found for IncidentTask #%s',
incident_task.key().id())
incident_task.delete()
else:
# If the owner changed, delete the task for the previous owner.
if old_credentials:
try:
old_client.tasks().delete(
tasklist=incident_task.task_list_id,
task=incident_task.task_id).execute()
except errors.HttpError:
logging.error(
'Exception occured while deleting task %s - %s',
incident_task.task_list_id, incident_task.task_id)
logging.error(traceback.format_exc()[:-1])
else:
logging.warning(
'No credentials found for IncidentTask #%s',
incident_task.key().id())
new_credentials, new_settings = _GetCredentialsAndSettings(
incident.owner)
if new_credentials and new_settings.add_to_tasks:
AddTask(incident, new_client)
else:
incident_task.delete()
| [
[
8,
0,
0.0934,
0.0055,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1154,
0.0055,
0,
0.66,
0.0833,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.1209,
0.0055,
0,
0.66... | [
"\"\"\"Provides utility functions for Tasks API.\"\"\"",
"import logging",
"import traceback",
"from apiclient import errors",
"from apiclient.discovery import build",
"from oauth2client.appengine import StorageByKeyName",
"import httplib2",
"import model",
"def _BuildClient(credentials):\n \"\"\"B... |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Simple, extendable, mockable Python client for Google Storage.
This module only depends on standard Python libraries. It is intended to provide
a set of base client classes with all critical features implemented. Advanced
features can be added by extending the classes. Or, it can be used as-is.
Installation:
Put this script in your python path.
Usage:
1) Get a Google Storage account and credentials.
2) Put this script in your Python path.
2) Decide how you will store your credentials (private file, environment
variables, etc...).
3) Create a GsClient or child instance, passing credentials to constructor.
4) Use the relevant functions on the client
URL Encoding:
Users of this module do not need to URL encode/decode any request arguments
or response results.
Object names and query parameters may contain characters that are illegal
URL characters. So, all object name and query parameter values are
percent encoded by this module before sending the request. This is important
to understand since you do not want to encode your strings twice.
It is also important to understand that all object names and prefixes
found in ListBucketResult responses will not be encoded.
Handling Errors:
Google Storage service errors will be raised as GsError exceptions.
Other connection errors may get raised as httplib.HTTPException errors.
Windows Considerations:
When opening files, you must specify binary mode, like this:
infile = open(filename, 'rb')
outfile = open(filename, 'wb')
Example where credentials are in GS_ACCESS and GS_SECRET env vars:
$ python
>>> import os
>>> import gslite
>>> gs_access = os.environ['GS_ACCESS']
>>> gs_secret = os.environ['GS_SECRET']
>>> bucket = 'my_super_cool_bucket_name'
>>> filename = 'hello.txt'
>>> client = gslite.GsClient(access_key=gs_access, secret=gs_secret)
>>> client.put_bucket(bucket)
>>> infile = open(filename)
>>> client.put_object(bucket, filename, infile)
>>> infile.close()
>>> client.get_bucket(bucket).get_keys()
['hello.txt']
>>> client.delete_object(bucket, filename)
>>> client.delete_bucket(bucket)
"""
__version__ = '1.0'
import base64
import hashlib
import hmac
import httplib
import logging
import os
import StringIO
import time
import urllib
import urlparse
import xml.dom.minidom
# Success and retryable status codes.
REDIRECT_CODES = (301, 302, 303, 307)
DEFAULT_SUCCESS_CODES = (200,)
DEFAULT_RETRYABLE_CODES = (408, 500, 502, 503, 504)
GET_OBJECT_SUCCESS_CODES = (200, 206)
DEL_BUCKET_SUCCESS_CODES = (204,)
DEL_BUCKET_RETRYABLE_CODES = (404, 408, 409, 500, 502, 503, 504)
DEL_OBJECT_SUCCESS_CODES = (204,)
class GsError(Exception):
"""Base error for all client errors.
Instance data:
msg: error message
operations: list of operations associated with error
"""
def __init__(self, msg, operations):
"""GsError constructor.
Args:
msg: message string
operations: list of operations associated with error.
"""
self.msg = msg
self.operations = operations
def __str__(self):
"""Convert instance to loggable string."""
s = StringIO.StringIO()
s.write('GsError: %s' % self.msg)
for i in xrange(len(self.operations)):
s.write('\n\nOPERATION %d:' % i)
s.write('\n%s' % self.operations[i])
return s.getvalue()
class GsXmlBase(object):
"""Base XML oject parser/generator."""
@staticmethod
def value_from_elems(elems):
"""Returns a child node text value in the last element in elems.
Args:
elems: A list of Element objects from the xml.dom.minidom module.
Returns:
String value of last node or empty string if not found.
"""
ret = ''
if elems:
child_nodes = elems[-1].childNodes
if child_nodes:
ret = child_nodes[-1].nodeValue
return str(ret)
@staticmethod
def add_text_node(dom, parent_node, node_name, node_text):
"""Adds a simple text node to a parent node.
Args:
dom: dom object from xml.dom.minidom module.
parent_node: Parent Node object from the xml.dom.minidom module.
node_name: Name of new child node
node_text: Text content of new node.
"""
elem = dom.createElement(node_name)
text = dom.createTextNode(node_text)
elem.appendChild(text)
parent_node.appendChild(elem)
class GsAccessControlList(GsXmlBase):
"""AccessControlList XML parser/generator.
See the Google Storage API documentation for more information about the
AccessControlList XML specification.
Instance data:
owner_id: owner google storage id as string
owner_name: owner name as string
entries: list of GsAccessControlList.Entry instances
"""
class Entry(object):
"""Entry class corresponding to like named element.
Instance data:
permission: permission as string ('READ', 'WRITE', etc...)
scope_type: scope type as string ('UserById', etc...)
scope_user_id: scope user google storage id as string
scope_user_name: scope user name as string
scope_email: scope user email address as string
scope_domain: scope domain as string
"""
def __init__(self,
permission='',
scope_type='',
scope_user_id='',
scope_user_name='',
scope_email='',
scope_domain=''):
"""Entry Constructor.
Args:
permission: permission as string ('READ', 'WRITE', etc...)
scope_type: scope type as string ('UserById', etc...)
scope_user_id: scope user google storage id as string
scope_user_name: scope user name as string
scope_email: scope user email address as string
scope_domain: scope domain as string
"""
self.permission = permission
self.scope_type = scope_type
self.scope_user_id = scope_user_id
self.scope_user_name = scope_user_name
self.scope_email = scope_email
self.scope_domain = scope_domain
def __init__(self, owner_id='', owner_name=''):
"""GsAccessControlList Constructor.
Args:
owner_id: owner google storage id as string
owner_name: owner name as string
"""
self.owner_id = owner_id
self.owner_name = owner_name
self.entries = []
def add_entry(self,
permission='',
scope_type='',
scope_user_id='',
scope_user_name='',
scope_email='',
scope_domain=''):
"""Adds an entry to the acl.
Args:
permission: permission as string ('READ', 'WRITE', etc...)
scope_type: scope type as string ('UserById', etc...)
scope_user_id: scope user google storage id as string
scope_user_name: scope user name as string
scope_email: scope user email address as string
scope_domain: scope domain as string
"""
self.entries.append(GsAccessControlList.Entry(
permission=permission,
scope_type=scope_type,
scope_user_id=scope_user_id,
scope_user_name=scope_user_name,
scope_email=scope_email,
scope_domain=scope_domain))
def parse_xml(self, xml_str):
"""Parses the given xml string to this object.
Args:
xml_str: AccessControlList XML as string
"""
self.owner_id = ''
self.owner_name = ''
self.entries = []
dom = xml.dom.minidom.parseString(xml_str)
owner_elems = dom.getElementsByTagName('Owner')
for owner_elem in owner_elems:
self.owner_id = self.value_from_elems(
owner_elem.getElementsByTagName('ID'))
self.owner_name = self.value_from_elems(
owner_elem.getElementsByTagName('Name'))
entries_elems = dom.getElementsByTagName('Entries')
for entries_elem in entries_elems:
entry_elems = entries_elem.getElementsByTagName('Entry')
for entry_elem in entry_elems:
entry = GsAccessControlList.Entry()
entry.permission = self.value_from_elems(
entry_elem.getElementsByTagName('Permission'))
scope_elems = entry_elem.getElementsByTagName('Scope')
for scope_elem in scope_elems:
entry.scope_type = scope_elem.getAttribute('type')
entry.scope_user_id = self.value_from_elems(
scope_elem.getElementsByTagName('ID'))
entry.scope_user_name = self.value_from_elems(
scope_elem.getElementsByTagName('Name'))
entry.scope_email = self.value_from_elems(
scope_elem.getElementsByTagName('EmailAddress'))
entry.scope_domain = self.value_from_elems(
scope_elem.getElementsByTagName('Domain'))
self.entries.append(entry)
def to_xml(self, pretty=False):
"""Translates this acl object to XML string.
Args:
pretty: if True, output will use dom.toprettyxml
Returns:
AccessControlList XML as string
"""
impl = xml.dom.minidom.getDOMImplementation()
dom = impl.createDocument(None, 'AccessControlList', None)
top_elem = dom.documentElement
if self.owner_id or self.owner_name:
owner_elem = dom.createElement('Owner')
if self.owner_id:
self.add_text_node(dom, owner_elem, 'ID', self.owner_id)
if self.owner_name:
self.add_text_node(dom, owner_elem, 'Name', self.owner_name)
top_elem.appendChild(owner_elem)
if self.entries:
entries_elem = dom.createElement('Entries')
for entry in self.entries:
entry_elem = dom.createElement('Entry')
if entry.permission:
self.add_text_node(dom,
entry_elem,
'Permission',
entry.permission)
if (entry.scope_type or
entry.scope_user_id or
entry.scope_user_name or
entry.scope_email or
entry.scope_domain):
scope_elem = dom.createElement('Scope')
if entry.scope_type:
scope_elem.setAttribute('type', entry.scope_type)
if entry.scope_user_id:
self.add_text_node(dom,
scope_elem,
'ID',
entry.scope_user_id)
if entry.scope_user_name:
self.add_text_node(dom,
scope_elem,
'Name',
entry.scope_user_name)
if entry.scope_email:
self.add_text_node(dom,
scope_elem,
'EmailAddress',
entry.scope_email)
if entry.scope_domain:
self.add_text_node(dom,
scope_elem,
'Domain',
entry.scope_domain)
entry_elem.appendChild(scope_elem)
entries_elem.appendChild(entry_elem)
top_elem.appendChild(entries_elem)
if pretty:
return dom.toprettyxml(indent=' ')
return dom.toxml()
class GsListAllMyBucketsResult(GsXmlBase):
"""ListAllMyBucketsResult XML parser.
See the Google Storage API documentation for more information about the
ListAllMyBucketsResult XML specification.
Instance data:
owner_id: owner google storage id as string
owner_display_name: owner name as string
bucket_list: list of GsListAllMyBucketsResult.Bucket instances
"""
class Bucket(object):
"""Bucket class corresponding to like named element.
Instance data:
name: bucket name as string
creation_date: bucket creation date as string
"""
def __init__(self):
"""Bucket constructor."""
self.name = ''
self.creation_date = ''
def __init__(self):
"""GsListAllMyBucketsResult constructor."""
self.owner_id = ''
self.owner_display_name = ''
self.bucket_list = []
def parse_xml(self, xml_str):
"""Parses the given xml string to this object.
Args:
xml_str: ListAllMyBucketsResult XML as string
"""
self.owner_id = ''
self.owner_display_name = ''
self.bucket_list = []
dom = xml.dom.minidom.parseString(xml_str)
owner_elems = dom.getElementsByTagName('Owner')
for owner_elem in owner_elems:
self.owner_id = self.value_from_elems(
owner_elem.getElementsByTagName('ID'))
self.owner_display_name = self.value_from_elems(
owner_elem.getElementsByTagName('DisplayName'))
buckets_elems = dom.getElementsByTagName('Buckets')
for buckets_elem in buckets_elems:
bucket_elems = buckets_elem.getElementsByTagName('Bucket')
for bucket_elem in bucket_elems:
bucket = GsListAllMyBucketsResult.Bucket()
bucket.name = self.value_from_elems(
bucket_elem.getElementsByTagName('Name'))
bucket.creation_date = self.value_from_elems(
bucket_elem.getElementsByTagName('CreationDate'))
self.bucket_list.append(bucket)
def get_bucket_names(self):
"""Returns the list of bucket names from self.bucket_list."""
return [b.name for b in self.bucket_list]
class GsListBucketResult(GsXmlBase):
"""ListBucketResult XML parser.
See the Google Storage API documentation for more information about the
ListBucketResult XML specification.
Instance data:
name: bucket name as string
prefix: prefix specified in request as string
marker: marker specified in request as string
is_truncated: "true" if all objects in bucket were returned
contents_list: list of GsListBucketResult.Contents instances
common_prefixes: list of <CommonPrefixes>.<Prefix> names as strings
"""
class Contents(object):
"""Contents class corresponding to like named element.
Instance data:
key: object name as string
last_modified: time object last modified as string
etag: object data etag value as string
size: object size as string
storage_class: object storage class as string
owner_id: object owner google storage id as string
owner_display_name: object owner name as string
"""
def __init__(self):
"""Contents constructor."""
self.key = ''
self.last_modified = ''
self.etag = ''
self.size = ''
self.storage_class = ''
self.owner_id = ''
self.owner_display_name = ''
def __init__(self):
"""GsListBucketResult constructor."""
self.name = ''
self.prefix = ''
self.marker = ''
self.is_truncated = ''
self.contents_list = []
self.common_prefixes = []
def parse_xml(self, xml_str):
"""Parses the given xml string to this object.
Args:
xml_str: ListBucketResult XML as string
"""
self.contents_list = []
self.common_prefixes = []
dom = xml.dom.minidom.parseString(xml_str)
self.name = self.value_from_elems(dom.getElementsByTagName('Name'))
self.prefix = self.value_from_elems(dom.getElementsByTagName('Prefix'))
self.marker = self.value_from_elems(dom.getElementsByTagName('Marker'))
self.is_truncated = self.value_from_elems(
dom.getElementsByTagName('IsTruncated'))
contents_elems = dom.getElementsByTagName('Contents')
for contents_elem in contents_elems:
contents = GsListBucketResult.Contents()
contents.key = self.value_from_elems(
contents_elem.getElementsByTagName('Key'))
contents.last_modified = self.value_from_elems(
contents_elem.getElementsByTagName('LastModified'))
contents.etag = self.value_from_elems(
contents_elem.getElementsByTagName('ETag'))
contents.size = self.value_from_elems(
contents_elem.getElementsByTagName('Size'))
contents.storage_class = self.value_from_elems(
contents_elem.getElementsByTagName('StorageClass'))
owner_elems = contents_elem.getElementsByTagName('Owner')
for owner_elem in owner_elems:
contents.owner_id = self.value_from_elems(
owner_elem.getElementsByTagName('ID'))
contents.owner_display_name = self.value_from_elems(
owner_elem.getElementsByTagName('DisplayName'))
self.contents_list.append(contents)
common_prefixes_elems = dom.getElementsByTagName('CommonPrefixes')
for common_prefixes_elem in common_prefixes_elems:
prefix_elems = common_prefixes_elem.getElementsByTagName('Prefix')
for prefix_elem in prefix_elems:
self.common_prefixes.append(prefix_elem.childNodes[0].nodeValue)
def get_keys(self):
"""Returns the list of object names found in self.contents_list."""
return [c.key for c in self.contents_list]
class GsOperation(object):
"""Class to hold the important details of an HTTP request and response.
Instance data:
connection_host: host name connected to as string
connection_port: host port connected to as int
request_method: http request method ('GET', 'PUT', etc...) as string
request_path_and_query: request URL path and query as string
request_headers: request headers as dict
response_status: response http status as int
response_headers: response headers as dict
response_error_body: response error body as string
"""
def __init__(self):
"""GsOperation constructor."""
self.connection_host = ''
self.connection_port = 80
self.request_method = ''
self.request_path_and_query = ''
self.request_headers = None
self.response_status = 0
self.response_headers = None
self.response_error_body = None
def __str__(self):
"""Convert instance to loggable string."""
s = StringIO.StringIO()
s.write('REQUEST:')
s.write('\nSent to host: %s:%d' % (self.connection_host,
self.connection_port))
s.write('\n%s %s' % (self.request_method, self.request_path_and_query))
if self.request_headers:
for k, v in self.request_headers.iteritems():
s.write('\n%s: %s' % (k, v))
s.write('\nRESPONSE:')
s.write('\n%d' % self.response_status)
if self.response_headers:
for k, v in self.response_headers.iteritems():
s.write('\n%s: %s' % (k, v))
if self.response_error_body:
s.write('\n')
s.write(self.response_error_body)
return s.getvalue()
class GsClient(object):
"""Google Storage client.
Instance data:
access_key: google storage access key as string for authentication
secret: google storage secret key as string for authentication
host: google storage host as string
proxy_host: optional proxy host
proxy_port: optional proxy port
auth_id: authentication type as string
max_retries: max num retries for retryable errors
max_redirects: max num redirects to follow
operations: list of GsOperation instances for most recent request
Note that each retry or redirection will append to this list.
backoff_exponent: current backoff exponent during failures
"""
def __init__(self,
access_key=None,
secret=None,
host='commondatastorage.googleapis.com',
proxy_host=None,
proxy_port=80,
auth_id='GOOG1',
max_retries=5,
max_redirects=10):
"""GsClient constructor.
Args:
access_key: google storage access key as string for authentication
secret: google storage secret key as string for authentication
host: google storage host as string
proxy_host: optional proxy host
proxy_port: optional proxy port
auth_id: authentication type as string
max_retries: max num retries for retryable errors
max_redirects: max num redirects to follow
"""
self.access_key = access_key
self.secret = secret
self.host = host
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.auth_id = auth_id
self.max_retries = max_retries
self.max_redirects = max_redirects
self.operations = []
self.backoff_exponent = -1
def get_service(self):
"""GET Service.
Returns:
GsListAllMyBucketsResult instance
"""
outfile = StringIO.StringIO()
self.send_request('GET', outfile=outfile)
result = GsListAllMyBucketsResult()
result.parse_xml(outfile.getvalue())
return result
def get_bucket(self,
bucket,
query_parameters=None):
"""GET Bucket.
Args:
bucket: bucket name as string
query_parameters: query parameters as dict
Returns:
GsListBucketResult instance
"""
outfile = StringIO.StringIO()
self.send_request('GET',
bucket=bucket,
outfile=outfile,
query_parameters=query_parameters)
result = GsListBucketResult()
result.parse_xml(outfile.getvalue())
return result
def get_bucket_acl(self,
bucket):
"""GET Bucket ACL.
Args:
bucket: bucket name as string
Returns:
GsAccessControlList instance
"""
outfile = StringIO.StringIO()
self.send_request('GET',
bucket=bucket,
outfile=outfile,
query_parameters={'acl': None})
acl = GsAccessControlList()
acl.parse_xml(outfile.getvalue())
return acl
def get_object(self,
bucket,
key,
outfile,
extra_headers=None,
query_parameters=None,
chunk_size=0):
"""GET Object.
Args:
bucket: bucket name as string
key: object name as string
outfile: an open file-like object
Only success responses will be written to this file.
Error resonses will be found in the operation objects
extra_headers: optional request headers as dict
query_parameters: optional query parameters as dict
chunk_size: size of each socket read (default of 0 = read all)
"""
self.send_request('GET',
bucket=bucket,
key=key,
outfile=outfile,
extra_headers=extra_headers,
query_parameters=query_parameters,
chunk_size=chunk_size,
success_status_codes=GET_OBJECT_SUCCESS_CODES)
def get_object_acl(self,
bucket,
key):
"""GET Object ACL.
Args:
bucket: bucket name as string
key: object name as string
Returns:
GsAccessControlList instance
"""
outfile = StringIO.StringIO()
self.send_request('GET',
bucket=bucket,
key=key,
outfile=outfile,
query_parameters={'acl': None})
acl = GsAccessControlList()
acl.parse_xml(outfile.getvalue())
return acl
def head_object(self,
bucket,
key,
extra_headers=None):
"""HEAD Object.
Args:
bucket: bucket name as string
key: object name as string
extra_headers: optional request headers as dict
Returns:
response headers as dict
"""
self.send_request('HEAD',
bucket=bucket,
key=key,
extra_headers=extra_headers)
return self.operations[-1].response_headers
def put_bucket(self,
bucket,
infile=None,
extra_headers=None,
query_parameters=None):
"""PUT Bucket.
Args:
bucket: bucket name as string
infile: an open file-like object
data in this file will be written to the http socket
extra_headers: optional request headers as dict
query_parameters: optional query parameters as dict
"""
self.send_request('PUT',
bucket=bucket,
infile=infile,
extra_headers=extra_headers,
query_parameters=query_parameters)
def put_bucket_acl(self,
bucket,
acl):
"""PUT Bucket ACL.
Args:
bucket: bucket name as string
acl: GsAccessControlList instance
"""
infile = StringIO.StringIO(acl.to_xml())
self.put_bucket(bucket,
infile=infile,
query_parameters={'acl': None})
def put_object(self,
bucket,
key,
infile,
extra_headers=None,
query_parameters=None,
chunk_size=0):
"""PUT Object.
Args:
bucket: bucket name as string
key: object name as string
infile: an open file-like object
data in this file will be written to the http socket
extra_headers: optional request headers as dict
query_parameters: optional query parameters as dict
chunk_size: size of each socket write (default of 0 = write all)
"""
self.send_request('PUT',
bucket=bucket,
key=key,
infile=infile,
extra_headers=extra_headers,
query_parameters=query_parameters,
chunk_size=chunk_size)
def put_object_acl(self,
bucket,
key,
acl):
"""PUT Object ACL.
Args:
bucket: bucket name as string
key: object name as string
acl: GsAccessControlList instance
"""
infile = StringIO.StringIO(acl.to_xml())
self.put_object(bucket,
key,
infile,
query_parameters={'acl': None})
def delete_bucket(self,
bucket):
"""DELETE Bucket.
Args:
bucket: bucket name as string
"""
self.send_request(
'DELETE',
bucket=bucket,
success_status_codes=DEL_BUCKET_SUCCESS_CODES,
retryable_status_codes=DEL_BUCKET_RETRYABLE_CODES)
def delete_object(self,
bucket,
key):
"""DELETE Object.
Args:
bucket: bucket name as string
key: object name as string
"""
self.send_request('DELETE',
bucket=bucket,
key=key,
success_status_codes=DEL_OBJECT_SUCCESS_CODES)
def send_request(self,
http_method,
bucket=None,
key=None,
infile=None,
outfile=None,
extra_headers=None,
query_parameters=None,
chunk_size=0,
success_status_codes=DEFAULT_SUCCESS_CODES,
retryable_status_codes=DEFAULT_RETRYABLE_CODES):
"""Sends the specifed request.
Retries and follows redirection as necessary.
Args:
http_method: http method as string ('GET', 'PUT', etc...)
bucket: bucket name as string
key: object name as string
infile: an open file-like object
data in this file will be written to the http socket
outfile: an open file-like object
Only success responses will be written to this file.
Error resonses will be found in the operation objects
extra_headers: optional request headers as dict
query_parameters: optional query parameters as dict
chunk_size: size of each socket read/write (default of 0 = all)
success_status_codes: response status codes considered success
retryable_status_codes: response status codes considered retryable
Returns:
self.operations: the list of operations executed for this request.
"""
self.operations = []
operation = None
redirect_location = None
retries = 0
redirects = 0
while retries <= self.max_retries and redirects <= self.max_redirects:
# Need backoff sleep?
if self.backoff_exponent >= 0:
self._backoff_sleep()
# Prepare operation
if redirect_location:
operation = self._create_redirect_operation(
operation, redirect_location)
redirect_location = None
else:
operation = self._create_init_operation(
http_method,
bucket=bucket,
key=key,
extra_headers=extra_headers,
query_parameters=query_parameters,
infile=infile)
# Execute operation
try:
operation = self._exec_operation(
operation,
infile=infile,
outfile=outfile,
chunk_size=chunk_size,
success_status_codes=success_status_codes)
except httplib.IncompleteRead, e:
operation.response_error_body = (
'IncompleteRead: %d bytes read' % (e.partial))
retries += 1
self._backoff_increment()
continue
finally:
self.operations.append(operation)
# Check for success
if operation.response_status in success_status_codes:
self._backoff_decrement()
return self.operations
# Check for redirect
elif operation.response_status in REDIRECT_CODES:
self._backoff_decrement()
redirect_location = operation.response_headers['location']
redirects += 1
logging.debug('Redirected to %s', redirect_location)
continue
# Check for retryable failures
elif operation.response_status in retryable_status_codes:
self._backoff_increment()
retries += 1
continue
else:
self._backoff_increment()
break
raise GsError('Service Failure', self.operations)
def _exec_operation(self,
operation,
infile=None,
outfile=None,
chunk_size=0,
success_status_codes=DEFAULT_SUCCESS_CODES):
"""Executes given operation request, and populates response."""
connection = None
try:
logging.debug('%s %s %s',
operation.request_method,
operation.request_path_and_query,
str(operation.request_headers))
# Connect
connection = self._connect(operation.connection_host,
operation.connection_port)
# Write the first line of the request
self._put_request(connection,
operation.request_method,
operation.request_path_and_query)
# Write the headers
self._put_headers(connection,
operation.request_headers)
# Write the data
if infile:
self._write(connection, infile, chunk_size)
else:
# Flush the header write with no body
connection.send('')
# Get the response
response = connection.getresponse()
# Get the status
operation.response_status = response.status
# Read the response headers
operation.response_headers = {}
operation.response_headers.update(response.getheaders())
# Read the response data (not for HEAD)
if operation.request_method != 'HEAD':
# Don't put data in outfile unless success status
if operation.response_status in success_status_codes:
if outfile:
self._read(response, outfile, chunk_size)
# Read the error body
else:
operation.response_error_body = response.read()
finally:
if connection:
self._close(connection)
return operation
def _create_init_operation(self,
http_method,
bucket=None,
key=None,
extra_headers=None,
query_parameters=None,
infile=None):
"""Inits a new operation with request fields."""
op = GsOperation()
if self.proxy_host:
op.connection_host = self.proxy_host
op.connection_port = self.proxy_port
else:
op.connection_host = self.host
op.connection_port = 80
op.request_method = http_method
path = self._get_path(bucket, key)
query_string = self._get_query_string(query_parameters)
op.request_path_and_query = path + query_string
op.request_headers = self._get_request_headers(
http_method,
path,
query_parameters,
extra_headers,
infile)
return op
def _create_redirect_operation(self,
previous_operation,
location):
"""Creates a new op based on the last op and the redirection."""
parts = urlparse.urlparse(location)
op = GsOperation()
if self.proxy_host:
op.connection_host = self.proxy_host
op.connection_port = self.proxy_port
else:
host_and_port = parts.netloc.split(':')
op.connection_host = host_and_port[0]
if len(host_and_port) > 1:
op.connection_port = int(host_and_port[1])
else:
op.connection_port = 80
op.request_method = previous_operation.request_method
op.request_path_and_query = parts.path
if parts.query:
op.request_path_and_query += '?%s' % parts.query
op.request_headers = previous_operation.request_headers.copy()
op.request_headers['Host'] = parts.netloc # host and optional port
return op
def _backoff_decrement(self):
"""Decrements the backoff exponent toward min of -1 (off)."""
if self.backoff_exponent > -1:
self.backoff_exponent -= 1
def _backoff_increment(self):
"""Increments the backoff exponent toward max of 5."""
if self.backoff_exponent < 5:
self.backoff_exponent += 1
def _backoff_sleep(self):
"""Backoff sleep function called between retry attempts.
See Google Storage docs for required exponential backoff
when errors occur.
Override this if you want it to do more.
"""
sleep_sec = 1 << self.backoff_exponent
logging.debug('Backoff sleep, retrying in %d seconds...', sleep_sec)
time.sleep(sleep_sec)
def _connect(self, host, port):
"""Returns a connection object.
Override this if you have an alternate connection implementation.
"""
return httplib.HTTPConnection(host, port=port)
def _close(self, connection):
"""Closes the connection.
Override this if you want it to do more.
"""
connection.close()
def _put_request(self,
connection,
http_method,
path_and_query):
"""Sends the method, path, and query to the connection.
Override this if you want it to do more.
"""
connection.putrequest(http_method,
path_and_query,
skip_host=True,
skip_accept_encoding=True)
def _put_headers(self,
connection,
headers):
"""Sends the request headers to the connection.
Override this if you want it to do more.
"""
for name, val in headers.iteritems():
connection.putheader(name, val)
connection.endheaders()
def _write(self, connection, infile, chunk_size):
"""Writes data in infile to the open connection.
Override this if you want it to do more.
Perhaps for performance measuring or periodic callbacks.
"""
infile.seek(0)
if chunk_size > 0:
while True:
chunk = infile.read(chunk_size)
if chunk:
connection.send(chunk)
else:
break
else:
connection.send(infile.read())
def _read(self, response, outfile, chunk_size):
"""Reads data from response, and writes it to outfile.
Override this if you want it to do more.
Perhaps for performance measuring or periodic callbacks.
"""
if chunk_size > 0:
while True:
chunk = response.read(chunk_size)
if chunk:
outfile.write(chunk)
else:
break
else:
outfile.write(response.read())
outfile.flush()
def _get_request_headers(self,
http_method,
path,
query_parameters,
extra_headers,
infile):
"""Returns the request header dict based on args."""
headers = {}
# Content-Length
if infile:
infile.seek(0, os.SEEK_END)
headers['Content-Length'] = infile.tell()
else:
headers['Content-Length'] = '0'
# Date
headers['Date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime())
# Host
headers['Host'] = self.host
# User-Agent
headers['User-Agent'] = 'gslite/' + __version__
# Add extra headers
if extra_headers:
headers.update(extra_headers)
# Authorization
if self.access_key and self.secret:
headers['Authorization'] = self._get_authentication(
http_method,
path,
query_parameters,
headers)
return headers
def _get_path(self, bucket, key):
"""Returns the URL path based on args."""
s = StringIO.StringIO()
s.write('/')
if bucket:
s.write(urllib.quote(bucket))
if key:
s.write('/')
s.write(urllib.quote(key))
return s.getvalue()
def _get_query_string(self, query_parameters):
"""Returns the URL query string based on query dict."""
s = StringIO.StringIO()
if query_parameters:
s.write('?')
first = True
for name, val in query_parameters.iteritems():
if first:
first = False
else:
s.write('&')
s.write(name)
if val:
s.write('=%s' % urllib.quote(str(val)))
return s.getvalue()
def _get_authentication(self, http_method, path, query_parameters, headers):
"""Returns the Authorization header value based on args."""
string_to_sign = StringIO.StringIO()
# HTTP method
string_to_sign.write('%s\n' % http_method)
# Content-Md5
if 'Content-MD5' in headers:
string_to_sign.write(headers['Content-MD5'].strip())
string_to_sign.write('\n')
# Content-Type
if 'Content-Type' in headers:
string_to_sign.write(headers['Content-Type'].strip())
string_to_sign.write('\n')
# Date
if ('x-goog-date' not in headers and
'Date' in headers):
string_to_sign.write(headers['Date'])
string_to_sign.write('\n')
# Extension headers
sorted_header_keys = headers.keys()
sorted_header_keys.sort()
for header_key in sorted_header_keys:
if header_key.startswith('x-goog-'):
string_to_sign.write('%s:%s\n' % (
header_key, headers[header_key]))
# Resource
string_to_sign.write(path)
if query_parameters:
for subresource in ('acl', 'location', 'logging', 'torrent'):
if subresource in query_parameters:
string_to_sign.write('?%s' % subresource)
# should only be one of these
break
# HMAC-SHA1
h = hmac.new(self.secret, digestmod=hashlib.sha1)
h.update(string_to_sign.getvalue())
signature = base64.b64encode(h.digest())
# Put it all together
return '%s %s:%s' % (self.auth_id, self.access_key, signature)
| [
[
8,
0,
0.037,
0.0449,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0606,
0.0008,
0,
0.66,
0.0385,
162,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.0623,
0.0008,
0,
0.66,
... | [
"\"\"\"Simple, extendable, mockable Python client for Google Storage.\n\nThis module only depends on standard Python libraries. It is intended to provide\na set of base client classes with all critical features implemented. Advanced\nfeatures can be added by extending the classes. Or, it can be used as-is.\n\nInsta... |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User settings page for the application."""
import os
import re
from apiclient.discovery import build
from oauth2client import client
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import login_required
import httplib2
import model
import settings
class UserSettingsPage(webapp.RequestHandler):
"""RequestHandler for the Admin page."""
@login_required
def get(self):
"""Display the admin page template."""
template_value = {}
user = users.get_current_user()
template_value['current_user'] = user.email()
if users.is_current_user_admin():
credentials = model.Credentials.get_by_key_name(
settings.CREDENTIALS_KEYNAME)
if credentials:
template_value['credentials_email'] = credentials.email
template_value['is_admin'] = True
else:
template_value['is_admin'] = False
# Determine whether or not the server is running locally, and offer a
# datastore reset if it's not.
if re.search('(appspot)', self.request.host):
template_value['is_local'] = False
else:
template_value['is_local'] = True
# Make a list of tags from the datastore to pass to template.
suggestion_models = model.SuggestionModel.all()
suggestion_models.order('__key__')
template_value['models'] = suggestion_models
credentials = model.Credentials.get_by_key_name(
settings.CREDENTIALS_KEYNAME)
status = {}
if credentials is not None:
credentials = credentials.credentials
http = httplib2.Http()
http = credentials.authorize(http)
service = build('prediction', 'v1.4', http=http)
try:
train = service.trainedmodels()
for suggestion_model in suggestion_models:
state = train.get(id=suggestion_model.training_file).execute()
status[suggestion_model.name] = state['trainingStatus']
except client.AccessTokenRefreshError:
status['Failed to retrieve training data'] = 'Refresh credentials'
else:
status['Add Credentials to access models'] = '...'
template_value['status'] = status
path = os.path.join(settings.TEMPLATE_BASE_PATH, 'user_settings.html')
self.response.out.write(template.render(path, template_value))
| [
[
8,
0,
0.1954,
0.0115,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2414,
0.0115,
0,
0.66,
0.0833,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.2529,
0.0115,
0,
0.66... | [
"\"\"\"User settings page for the application.\"\"\"",
"import os",
"import re",
"from apiclient.discovery import build",
"from oauth2client import client",
"from google.appengine.api import users",
"from google.appengine.ext import webapp",
"from google.appengine.ext.webapp import template",
"from ... |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Authorization grant page for the application."""
import logging
import os
import pickle
from oauth2client.appengine import StorageByKeyName
from oauth2client.client import OAuth2WebServerFlow
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import login_required
from google.appengine.ext.webapp.util import run_wsgi_app
import model
import settings
"""
Maps OAuth API parameter with API scope.
The current supported values are:
{
'<api_name>': {
'admin_required': Whether or not this API is "admin-only".
'scopes': The requested Google API Scopes.
'model': Datastore model used to store the credentials.
'credentials_attribute': Datastore model attribute used to store the
credentials.
'key_name': Key name to use if only one instance of this model has to be
stored at a time. Optional, this value default to the current
user ID.
"""
SCOPES = {
'prediction': {
'admin_required': True,
'scopes': ['https://www.googleapis.com/auth/prediction'],
'model': model.Credentials,
'credentials_attribute': 'credentials',
'key_name': settings.CREDENTIALS_KEYNAME
},
'tasks': {
'admin_required': False,
'scopes': ['https://www.googleapis.com/auth/tasks'],
'model': model.UserSettings,
'credentials_attribute': 'tasks_credentials'
}
}
class OAuthGrantPage(webapp.RequestHandler):
"""RequestHandler for the authorization grant page."""
@login_required
def get(self, api):
"""Handle the GET request for the OAuth grant page.
Construct the authorization grant URL and redirect the user to it.
Args:
api: Private API name to ask access for (should be a key of SCOPES).
"""
if (api not in SCOPES or
SCOPES[api]['admin_required'] and not users.is_current_user_admin()):
self.status(400)
else:
user = users.get_current_user()
logging.info('%s (%s) has entered OAuth 2.0 grant flow',
user.email(), user.user_id())
flow = OAuth2WebServerFlow(client_id=settings.CLIENT_ID,
client_secret=settings.CLIENT_SECRET,
scope=' '.join(SCOPES[api]['scopes']),
user_agent=settings.USER_AGENT,
domain=settings.DOMAIN,
state=api, access_type='offline')
callback = self.request.host_url + '/oauth2callback'
authorize_url = flow.step1_get_authorize_url(callback)
memcache.set(user.user_id() + api, pickle.dumps(flow))
self.redirect(authorize_url)
class OAuthCallbackPage(webapp.RequestHandler):
"""RequestHandler for the authorization callback page."""
@login_required
def get(self):
"""Handle the GET request for the OAuth callback page.
Get the stored user's credentials flow and request the access token to
finish the OAuth 2.0 dance.
If successful, the user's OAuth 2.0 credentials are stored in the datastore.
"""
user = users.get_current_user()
error = self.request.get('error')
api = self.request.params.get('state')
if (api not in SCOPES or
SCOPES[api]['admin_required'] and not users.is_current_user_admin()):
self.status(404)
elif error and error == 'access_denied':
logging.warning('%s (%s) has denied access to the APIs',
user.email(), user.user_id())
else:
pickled_flow = memcache.get(user.user_id() + api)
if pickled_flow:
flow = pickle.loads(pickled_flow)
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
SCOPES[api]['model'], SCOPES[api].get('key_name') or user.email(),
SCOPES[api]['credentials_attribute']).put(credentials)
if SCOPES[api].get('key_name'):
# Add the email to the datastore Credentials entry.
credentials = model.Credentials.get_by_key_name(
settings.CREDENTIALS_KEYNAME)
credentials.email = user.email()
credentials.put()
logging.info('Successfully stored OAuth 2.0 credentials for: %s (%s)',
user.email(), user.user_id())
else:
logging.warning('Unknown flow for user: %s (%s)',
user.email(), user.user_id())
self.redirect('/')
path = os.path.join(settings.TEMPLATE_BASE_PATH, 'oauth.html')
self.response.out.write(template.render(path, {}))
application = webapp.WSGIApplication(
[
('/oauth/(.*)', OAuthGrantPage),
('/oauth2callback', OAuthCallbackPage),
],
debug=True)
def main():
"""Runs the application."""
run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1083,
0.0064,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1338,
0.0064,
0,
0.66,
0.05,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.1401,
0.0064,
0,
0.66,
... | [
"\"\"\"Authorization grant page for the application.\"\"\"",
"import logging",
"import os",
"import pickle",
"from oauth2client.appengine import StorageByKeyName",
"from oauth2client.client import OAuth2WebServerFlow",
"from google.appengine.api import memcache",
"from google.appengine.api import user... |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Landing page for the application."""
import os
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from api.handler import INCIDENT_FILTERS
from settings import TEMPLATE_BASE_PATH
class LandingPage(webapp.RequestHandler):
"""Landing page handler."""
def get(self):
"""Render the landing page."""
user = users.get_current_user()
if user:
template_values = {
'owner': user.email(),
# Retrieve the list of filters to add as autocomplete params.
'filters': [x[0] for x in INCIDENT_FILTERS],
}
path = os.path.join(TEMPLATE_BASE_PATH, 'page.html')
self.response.out.write(template.render(path, template_values))
else:
self.redirect(users.create_login_url(self.request.uri))
| [
[
8,
0,
0.3864,
0.0227,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.4773,
0.0227,
0,
0.66,
0.1429,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.5,
0.0227,
0,
0.66,
... | [
"\"\"\"Landing page for the application.\"\"\"",
"import os",
"from google.appengine.api import users",
"from google.appengine.ext import webapp",
"from google.appengine.ext.webapp import template",
"from api.handler import INCIDENT_FILTERS",
"from settings import TEMPLATE_BASE_PATH",
"class LandingPa... |
#!/usr/bin/env python
"""ISO 8601 date time string parsing
Basic usage:
>>> import iso8601
>>> iso8601.parse_date("2007-01-25T12:00:00Z")
datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)
>>>
"""
from datetime import datetime, timedelta, tzinfo
import re
__all__ = ["parse_date", "ParseError"]
# Adapted from http://delete.me.uk/2005/03/iso8601.html
ISO8601_REGEX = re.compile(r"(?P<year>[0-9]{4})(-(?P<month>[0-9]{1,2})(-(?P<day>[0-9]{1,2})"
r"((?P<separator>.)(?P<hour>[0-9]{2}):(?P<minute>[0-9]{2})(:(?P<second>[0-9]{2})(\.(?P<fraction>[0-9]+))?)?"
r"(?P<timezone>Z|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?"
)
TIMEZONE_REGEX = re.compile("(?P<prefix>[+-])(?P<hours>[0-9]{2}).(?P<minutes>[0-9]{2})")
class ParseError(Exception):
"""Raised when there is a problem parsing a date string"""
# Yoinked from python docs
ZERO = timedelta(0)
class Utc(tzinfo):
"""UTC
"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
UTC = Utc()
class FixedOffset(tzinfo):
"""Fixed offset in hours and minutes from UTC
"""
def __init__(self, offset_hours, offset_minutes, name):
self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
def __repr__(self):
return "<FixedOffset %r>" % self.__name
def parse_timezone(tzstring, default_timezone=UTC):
"""Parses ISO 8601 time zone specs into tzinfo offsets
"""
if tzstring == "Z":
return default_timezone
# This isn't strictly correct, but it's common to encounter dates without
# timezones so I'll assume the default (which defaults to UTC).
# Addresses issue 4.
if tzstring is None:
return default_timezone
m = TIMEZONE_REGEX.match(tzstring)
prefix, hours, minutes = m.groups()
hours, minutes = int(hours), int(minutes)
if prefix == "-":
hours = -hours
minutes = -minutes
return FixedOffset(hours, minutes, tzstring)
def parse_date(datestring, default_timezone=UTC):
"""Parses ISO 8601 dates into datetime objects
The timezone is parsed from the date string. However it is quite common to
have dates without a timezone (not strictly correct). In this case the
default timezone specified in default_timezone is used. This is UTC by
default.
"""
if not isinstance(datestring, basestring):
raise ParseError("Expecting a string %r" % datestring)
m = ISO8601_REGEX.match(datestring)
if not m:
raise ParseError("Unable to parse date string %r" % datestring)
groups = m.groupdict()
tz = parse_timezone(groups["timezone"], default_timezone=default_timezone)
if groups["fraction"] is None:
groups["fraction"] = 0
else:
groups["fraction"] = int(float("0.%s" % groups["fraction"]) * 1e6)
return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]),
int(groups["hour"]), int(groups["minute"]), int(groups["second"]),
int(groups["fraction"]), tz)
| [
[
8,
0,
0.0583,
0.0874,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1165,
0.0097,
0,
0.66,
0.0833,
426,
0,
3,
0,
0,
426,
0,
0
],
[
1,
0,
0.1262,
0.0097,
0,
0.66... | [
"\"\"\"ISO 8601 date time string parsing\n\nBasic usage:\n>>> import iso8601\n>>> iso8601.parse_date(\"2007-01-25T12:00:00Z\")\ndatetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)\n>>>",
"from datetime import datetime, timedelta, tzinfo",
"import re",
"__all__ = [\"parse_date\", \"ParseErr... |
#!/usr/bin/env python
| [] | [] |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Aggregates individual email messages into a single incident."""
import logging
from time import strftime
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.mail_handlers import InboundMailHandler
import model
class MailAggregator(InboundMailHandler):
"""Handles incoming mail where each message is delivered individually."""
# Whether or not to save the message body on initial save.
SAVE_FULL_TEXT = True
FAKE_MESSAGE_ID = 'FAKEMESSAGEID'
# Format suitable for strftime
FAKE_MESSAGE_ID_SUFFIX_FORMAT = '%Y%m%d%H%M%S'
def receive(self, mail):
"""Handles receipt of an email message.
Args:
mail: Incoming message to parse.
"""
# Check for delivery dupes, first.
message_id = mail.original.get('Message-ID')
if message_id is None:
message_id = MailAggregator.FAKE_MESSAGE_ID + strftime(
MailAggregator.FAKE_MESSAGE_ID_SUFFIX_FORMAT)
message = model.Message.gql('WHERE message_id = :1', message_id).get()
# If there isn't already a copy, save the email.
if not message:
message = model.Message.FromMail(mail, message_id,
MailAggregator.SAVE_FULL_TEXT)
# Incident association is idempotent and can be repeated.
message.AssociateMailIncident()
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = webapp.WSGIApplication([MailAggregator.mapping()], debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.2429,
0.0143,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3,
0.0143,
0,
0.66,
0.1111,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.3143,
0.0143,
0,
0.66,
... | [
"\"\"\"Aggregates individual email messages into a single incident.\"\"\"",
"import logging",
"from time import strftime",
"from google.appengine.ext import webapp",
"from google.appengine.ext.webapp import util",
"from google.appengine.ext.webapp.mail_handlers import InboundMailHandler",
"import model"... |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides models for Au-to-do data types.
This module provides data types for Au-to-do data stored in the App Engine
datastore.
"""
from datetime import datetime
from datetime import timedelta
import email.utils
import logging
import os
import re
from sets import Set
import urllib
from oauth2client.appengine import CredentialsProperty
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import db
import simplejson
import settings
INCIDENT_DEEP_LINK = 'http://%s/#id=' % os.environ.get('HTTP_HOST', 'localhost')
class Incident(db.Model):
"""Describes an incident.
Incidents are support inquiries of one of several types. Examples include:
a thread from a mailing list (or Google Group), a Unify ticket, or a Google
Code Project Hosting issue.
Attributes:
title: Title of the incident.
created: When the incident started (or was first tracked).
updated: When the incident was last updated.
resolved: When the incident was resolved.
status: Current status of the incident (eg. waiting for customer response).
owner: Owner of the incident.
author: Person who created the incident.
mailing_list: Mailing list to which the incident was sent (if from a
mailing list).
canonical_link: Reference to the canonical location of the incident, e.g. a
the Google Group page or the Unify ticket.
suggested_tags: List of tags suggested by the Prediction API/suggester.
accepted_tags: List of tags approved or added by the user.
trained_tags: List of accepted_tags that generated training
examples. Used to detect changes in accepted_tags by the user.
trained_date: Date when this Incident was last processed for training data.
training_review: True when the training algorithm should check
this Incident for new Accepted tags or other changes related
to the Prediction API.
"""
title = db.StringProperty(multiline=True)
created = db.DateTimeProperty()
updated = db.DateTimeProperty()
resolved = db.DateTimeProperty()
status = db.StringProperty(default='new')
owner = db.StringProperty(default='none')
author = db.StringProperty()
mailing_list = db.StringProperty()
canonical_link = db.StringProperty()
suggested_tags = db.ListProperty(str)
accepted_tags = db.ListProperty(str)
trained_tags = db.ListProperty(str)
trained_date = db.DateTimeProperty()
training_review = db.BooleanProperty(default=True)
# Format used by the class for parsing dates.
ISO_FORMAT = '%Y-%m-%dT%H:%M:%S'
@staticmethod
def MergeWithParent(parent_incident, children):
"""Merges multiple child incidents into one incident with a common parent.
When child messages are delivered prior to their parent message (eg.
In-Reply-To references a message that has not yet been delivered), the
children will be roots of new incidents. Once the parent message is
delivered, this method will remove the child incidents and place them under
the common parent.
Records the current time in parent_incident.updated to help with
debounce of user input. Training may choose to ignore Incidents
which have been modified too recently because they are possibly
still under user review and editing.
Sets parent_incident.training_review to True, assuming that the
MergeWithParent might have affected the tags.
Args:
parent_incident: Parent incident, referenced by each of the children.
children: One or more messages referencing defunct parents.
"""
incidents_to_merge = Set()
for child in children:
incidents_to_merge.add(child.incident)
child.incident = parent_incident
child.put()
parent_incident.PurgeJsonCache()
parent_incident.updated = datetime.utcnow()
parent_incident.training_review = True
for incident in incidents_to_merge:
parent_incident.accepted_tags.extend(incident.accepted_tags)
parent_incident.suggested_tags.extend(incident.suggested_tags)
parent_incident.trained_tags.extend(incident.trained_tags)
parent_incident.trained_date = max([incident.trained_date,
incident.trained_date])
messages = Message.gql('WHERE incident = :1', incident.key())
for message in messages:
message.incident = parent_incident
message.put()
if not parent_incident.key() == incident.key():
incident.delete()
parent_incident.accepted_tags = list(set(parent_incident.accepted_tags))
parent_incident.suggested_tags = list(set(parent_incident.suggested_tags))
parent_incident.trained_tags = list(set(parent_incident.suggested_tags))
parent_incident.put()
def Overlay(self, other):
"""Overwrite this incident's fields with other incident's fields.
Records current time as time of update (incident.updated).
Does not overwrite messages.
Args:
other: Incident from which to pull values.
"""
self.title = other.title
self.owner = other.owner
self.status = other.status
self.created = other.created
self.updated = datetime.utcnow()
self.training_review = other.training_review
self.resolved = other.resolved
self.suggested_tags = other.suggested_tags
self.accepted_tags = other.accepted_tags
self.trained_tags = other.trained_tags
self.trained_date = other.trained_date
self.canonical_link = other.canonical_link
def GetDict(self):
"""Return a dict representation of this incident, with messages.
This will return a copy from memcache if it exists (and caching is
enabled), and update the cache if not present.
Returns:
Dict representing the incident.
"""
# Check for memcached copy first.
key = self.GetJsonModelKey()
cached = memcache.get(key)
if cached and settings.USE_MEMCACHE_FOR_JSON_MODELS:
return cached
model = {
'title': self.title,
'owner': self.owner,
'status': self.status,
'created': self.created,
'updated': self.updated,
'resolved': self.resolved,
'suggested_tags': self.suggested_tags,
'accepted_tags': self.accepted_tags,
'trained_tags': self.trained_tags,
'trained_date': self.trained_date,
'training_review': self.training_review,
'canonical_link': self.canonical_link,
'messages': []}
if self.key():
model['id'] = self.key().id()
if self.message_set is not None:
for m in self.message_set:
model['messages'].append(m.GetDict())
memcache.set(key, model)
return model
def PurgeJsonCache(self):
"""Purges the cached JSON representation of the incident."""
key = self.GetJsonModelKey()
logging.info('Purging cache for incident:' + key)
memcache.delete(key)
def GetJsonModelKey(self):
"""Returns the key pointing to the instance's JSON representation.
Returns:
JSON model key.
"""
return settings.MEMCACHE_VERSION_PREFIX + str(self.key().id())
def ToTaskDict(self, body=None):
"""Parse an incident into a Tasks API dictionary.
Args:
body: Optional dictionary to update.
Returns:
Dictionary representing the incident.
"""
body = body or {}
body['title'] = self.title
body['notes'] = self.GetDeepLink()
if self.resolved:
body['status'] = 'completed'
body['completed'] = self.GetDateTime(self.resolved)
else:
body['status'] = 'needsAction'
if 'completed' in body:
body.pop('completed')
return body
def GetDeepLink(self):
"""Return a deeplink to the incident.
Returns:
Deeplink to the incident.
"""
return '%s%s' % (INCIDENT_DEEP_LINK, self.key().id())
@staticmethod
def FromJson(json):
"""Convert the given JSON representation to an Incident.
Sets 'incident.training_review' to True, assuming that anything
could have changed on the client which sent the JSON, including
the Tags.
Does not include messages from given JSON, as messages are read-only.
Args:
json: JSON representation to convert.
Returns:
Incident with all the properties of the given JSON representation.
"""
retval = simplejson.loads(json)
incident = Incident(
title=retval.get('title'),
owner=retval.get('owner'),
status=retval.get('status'),
suggested_tags=retval.get('suggested_tags'),
accepted_tags=retval.get('accepted_tags'),
trained_tags=retval.get('trained_tags'),
training_review=True,
canonical_link=retval.get('canonical_link'))
if retval.get('created') is not None:
incident.created = Incident.ParseDate(retval.get('created'))
if retval.get('updated') is not None:
incident.updated = Incident.ParseDate(retval.get('updated'))
if retval.get('resolved') is not None:
incident.resolved = Incident.ParseDate(retval.get('resolved'))
if retval.get('trained_date') is not None:
incident.trained_date = Incident.ParseDate(retval.get('trained_date'))
return incident
@staticmethod
def ParseDate(date_string):
"""Converts a string into the ISO date format.
Args:
date_string: ISO-formatted date string.
Returns:
Native datetime object.
"""
if '.' in date_string:
(dt, microsecs) = date_string.split('.', 1)
if len(microsecs) > 3:
microsecs = microsecs[:3]
else:
dt = date_string
microsecs = 0
return_datetime = datetime.strptime(dt, Incident.ISO_FORMAT)
return_datetime += timedelta(microseconds=int(microsecs))
return return_datetime
@staticmethod
def GetDateTime(time):
"""Convert a datetime.datetime object to a Tasks API compatible string.
Args:
time: datetime.datetime to convert.
Returns:
String representing the datetime.datetime object.
"""
date_str = time.isoformat()
if len(date_str.split('.')) == 1:
date_str += '.000'
return date_str + 'Z'
class Message(db.Model):
"""Describes a message on from an incident.
Attributes:
message_id: RFC822 message ID. Populated when the message is an email.
in_reply_to: RFC822 message ID that the message references most recently.
references: Series of RFC822 message IDs, in reverse chronological order,
that are referenced by the message.
incident: Incident that the message belongs to.
title: Title of the message.
author: Author of the message.
body: Body of the message in plaintext.
sent: When the message was sent.
mailing_list: Mailing list to which the message was sent (if the message
is an email).
canonical_link: Reference to the canonical location of the message, e.g. a
the Google Group page or the Unify message.
"""
message_id = db.StringProperty()
in_reply_to = db.StringProperty()
references = db.TextProperty()
incident = db.ReferenceProperty(Incident)
title = db.StringProperty(multiline=True)
author = db.EmailProperty()
body = db.TextProperty()
sent = db.DateTimeProperty()
mailing_list = db.StringProperty()
canonical_link = db.StringProperty()
def AssociateMailIncident(self):
"""Associates a message with an incident, using RFC822 message IDs.
If the message refers to an existing incident, adds it to the
incident. If the message does not refer to another message, or
refers to a message not in the datastore, makes a new incident.
Sets the 'incident.updated' field to utcnow.
Sets the 'incident.training_review' to True since the new message
can expand the current training set for the Prediction API.
If the message is referenced by other incidents, merges those into
the incident.
"""
parent = Message.gql('WHERE message_id = :1', self.in_reply_to).get()
if parent and parent.incident:
logging.debug('Parent found: ' + parent.incident.title)
self.incident = parent.incident
self.put()
parent.incident.PurgeJsonCache()
parent.incident.updated = datetime.utcnow()
parent.incident.training_review = True
parent.incident.put()
# Merge other incidents that point to this one, into this incident.
children = Message.gql('WHERE in_reply_to = :1', self.message_id)
Incident.MergeWithParent(parent.incident, children)
else:
children = Message.gql('WHERE in_reply_to = :1 ORDER BY sent ASC',
self.message_id)
if children.count():
logging.debug('Found child messages: ' + str(children.count()))
# Update new message to refer to the oldest existing incident that
# references it.
incident = children[0].incident
self.incident = incident
self.put()
# And update the incident with earlier metadata.
incident.created = self.sent
incident.updated = datetime.utcnow()
incident.training_review = True
incident.title = self.title
incident.author = self.author
incident.mailing_list = self.mailing_list
incident.canonical_link = self.canonical_link
incident.put()
Incident.MergeWithParent(incident, children)
else:
logging.debug('New incident from: ' + self.message_id)
# Or it must be a new incident...
incident = Incident(title=self.title,
author=self.author,
created=self.sent,
mailing_list=self.mailing_list,
canonical_link=self.canonical_link)
incident.put()
self.incident = incident.key()
self.put()
logging.info('Adding to task queue incident_key=' +
str(self.incident.key()))
taskqueue.add(queue_name='predictor', url='/tasks/suggest',
params={'incident_key': str(self.incident.key())})
def ReferencesList(self):
"""Provides a list of RFC822 message IDs referenced by the message.
Returns:
List of RFC822 message IDs, in reverse chronological order, referenced by
the message.
"""
return self.references.split(',')
@staticmethod
def FromMail(mail, message_id, store_body=False):
"""Saves a mail message to the datastore.
Args:
mail: Incoming message to parse and save.
message_id: Message-ID of the incoming message.
store_body: Whether or not to store the message body.
Returns:
Saved message.
"""
message = Message(message_id=message_id)
message.canonical_link = Message.GetCanonicalLink(message_id)
parsed_tz_tuple = email.utils.parsedate_tz(mail.date)
time_tz = email.utils.mktime_tz(parsed_tz_tuple)
message.sent = datetime.utcfromtimestamp(time_tz)
if mail.original.get('Subject') and mail.subject:
message.title = mail.subject
m = re.search('.* <(.*)>', mail.sender)
if m:
message.author = m.group(1)
else:
message.author = mail.sender
logging.debug('Received a message from: ' + message.author)
if store_body:
message.body = Message.GetMailBody(mail, 'text/plain')
message.mailing_list = Message.GetMailingList(mail)
references = mail.original.get_all('References')
if references:
message.references = ','.join(references)
logging.debug(message.references)
message.in_reply_to = Message.GetInReplyTo(mail, references)
message.put()
Message.RecordMailingList(message)
Message._LogMessageIdDetails(message)
return message
@staticmethod
def GetCanonicalLink(message_id):
"""Constructs the canonical link for an email.
Args:
message_id: Message-ID of the incoming message.
Returns:
Canonical link for the email.
"""
base = 'https://mail.google.com/mail/#search/rfc822msgid%3A+'
escaped = urllib.quote_plus(message_id)
return base + escaped
@staticmethod
def GetMailBody(mail, body_type):
"""Retrieves the relevant mail body from the email.
Args:
mail: Incoming message to parse.
body_type: Content type of the body to retrieve.
Returns:
Relevant mail body.
"""
return list(mail.bodies(body_type))[0][1].decode()
@staticmethod
def GetInReplyTo(mail, references):
"""Retrieves the functional In-Reply-To header.
If an actual In-Reply-To header is not found, one will be constructed by
using the last entry of the References header, if it exists.
Args:
mail: Incoming message to parse.
references: Mail references, from the References header.
Returns:
Functional In-Reply-To value.
"""
in_reply_to = mail.original.get('In-Reply-To')
if not in_reply_to and references:
in_reply_to = references[-1].split('\n')[-1].split(' ')[-1]
logging.debug('Using last reference instead of In-Reply-To')
logging.debug(in_reply_to)
if in_reply_to:
single_line = in_reply_to.replace('\n', '')
return single_line
return None
@staticmethod
def GetMailingList(mail):
"""Retrieves the mailing list to which the message was sent.
Will attempt to use one of two headers to find the mailing list.
Args:
mail: Incoming message to parse.
Returns:
Mailing list address.
"""
if mail.original.get('Mailing-list'):
m = re.search('list (.+);', mail.original.get('Mailing-list'))
if m:
return m.group(1)
elif mail.original.get('List-Post'):
m = re.search('<mailto:(.+)>', mail.original.get('List-Post'))
if m:
return m.group(1)
return None
@staticmethod
def RecordMailingList(message):
"""Records existence of new mailing lists not previously recorded.
If the incoming message does not have a mailing list, this is a no-op.
Args:
message: Datastore representation of the incoming message.
"""
if message.mailing_list:
logging.debug('Mailing-list: ' + message.mailing_list)
list_entry = List.gql('WHERE email = :1', message.mailing_list).get()
if not list_entry:
logging.debug('List not found, adding entry')
list_entry = List(email=message.mailing_list)
list_entry.put()
@staticmethod
def _LogMessageIdDetails(message):
"""Saves debug information for the Message-ID and related fields.
Args:
message: Datastore representation of the incoming message.
"""
if message.message_id:
logging.debug('Message-ID: ' + message.message_id)
if message.in_reply_to:
logging.debug('In-Reply-To: ' + message.in_reply_to)
if message.references:
logging.debug('References: ' + message.references)
def GetDict(self):
"""Return a dict representation of this message.
This will return a copy from memcache if it exists (and caching is
enabled), and update the cache if not present.
Returns:
Dict representing the incident.
"""
key = self.GetJsonModelKey()
cached = memcache.get(key)
if cached and settings.USE_MEMCACHE_FOR_JSON_MODELS:
return cached
model = {
'message_id': self.message_id,
'in_reply_to': self.in_reply_to,
'references': self.references,
'title': self.title,
'author': self.author,
'body': self.body,
'sent': self.sent,
'mailing_list': self.mailing_list,
'canonical_link': self.canonical_link}
memcache.set(key, model)
return model
def GetJsonModelKey(self):
"""Returns the key pointing to the instance's JSON representation.
Returns:
JSON model key.
"""
return settings.MEMCACHE_VERSION_PREFIX + str(self.key().id())
class List(db.Model):
"""Describes a mailing list.
Attributes:
name: Name of the mailing list.
email: Email address of the mailing list.
"""
name = db.StringProperty()
email = db.EmailProperty()
class Tag(db.Model):
"""Describes a tag.
A tag includes a model and a category. The model may be explicitly
stated or, if it is missing, all tags with no explicit model are
implicitly part of the same unspecified model.
Attributes:
name: Name of the tag and key of model object. Format:
["model""_MODEL_MARKER"]"category". You can only set 'name'
when you create the Tag because it is the key.
example_count: Total count of current examples (with Accepted tags).
trained_count: Count of examples at last training. At the moment
of training, trained_count = example_count.
trained_date: When this tag's examples were last sent to the Prediction API
"""
example_count = db.IntegerProperty(default=0)
trained_count = db.IntegerProperty(default=0)
trained_date = db.DateTimeProperty()
# _DEFAULT_MODEL is used when the user does not specify a model.
# This app uses this string to name a training set for the Prediction
# API, creating a file on Google Storage with this prefix. This
# string also appears on the User Settings page to describe the
# model created when the user does not specify a model. You might
# want to change this string to localize it for presentation.
_DEFAULT_MODEL = 'unspecified_model'
# _DEFAULT_CATEGORY should never be seen or assigned since the UI
# should always guarantee a non-blank Tag name. Provided as a safe
# fallback. There is no need to change it.
_DEFAULT_CATEGORY = 'unspecified_category'
# _MODEL_MARKER defines the character which splits the model from
# the category. If you change this then you must also change the
# Javascript which enforces the tag definition in ui.js:
# google.devrel.samples.autodo.Bindings.bindTagTextInput
_MODEL_MARKER = '-'
@property
def name(self):
"""Get the Key name."""
return self.key().name()
@classmethod
def ModelCategory(cls, tag):
"""Split a tag into a model and category.
The goal is to isolate all the knowledge about how to parse a tag
and model within Tag so that other functions don't have to change
if we modify the format.
Args:
tag: String, the tag as typed by the user or Tag.name.
Returns:
Dictionary of [model, category, explicit]
model: Group of competing tags.
category: A classification within a model.
explicit: True if model was specified,
False if we applied default model name.
"""
logging.info('TAG=%s', tag)
split = dict(zip(('model', 'category'),
tag.split(cls._MODEL_MARKER)))
if 'category' in split:
split['explicit'] = True
else:
split['explicit'] = False
split['category'] = split['model']
split['model'] = cls._DEFAULT_MODEL
if not split['model']:
split['model'] = cls._DEFAULT_MODEL
if not split['category']:
split['category'] = cls._DEFAULT_CATEGORY
return split
@classmethod
def ModelMatches(cls, model, tag):
"""Determine if a tag is a category of a model."""
if cls._MODEL_MARKER not in tag and (
not model or model == cls._DEFAULT_MODEL):
return True
else:
return tag.startswith(model + cls._MODEL_MARKER)
@classmethod
def CreateMissingTags(cls, incident):
"""Create Tag Instances for tags in the given incident.
Tags could have come from the Prediction API or the user.
Args:
incident: Incident to pull tags from for creation.
"""
tags = set(incident.suggested_tags)
tags.update(incident.accepted_tags)
for tag in tags:
# Use negative example_count to signal a new tag.
tag_instance = cls.get_or_insert(tag, example_count=(-1))
if tag_instance.example_count < 0:
tag_instance.example_count = 0
tag_instance.put()
SuggestionModel.CreateMissingModel(tag)
class Credentials(db.Model):
"""Credentials Datastore class to store user's credentials information.
Attributes:
credentials: User's OAuth 2.0 credentials.
email: User's email.
user_id: User's ID (also used as key).
"""
credentials = CredentialsProperty()
email = db.StringProperty()
@property
def user_id(self):
return self.key().name()
class UserSettings(db.Model):
"""Store user's settings.
Attributes:
tasks_credentials: Tasks API scoped credentials.
email: User's email (also used as key).
add_to_tasks: Whether or not to automatically add assigned incidents to
the user's task list.
task_list_id: ID of the task list to add the incidents to.
"""
tasks_credentials = CredentialsProperty()
add_to_tasks = db.BooleanProperty(default=False)
task_list_id = db.StringProperty(default='@default')
@property
def email(self):
return self.key().name()
class IncidentTask(db.Model):
"""Store link between an incident and a user's Task.
Attributes:
incident_id: ID of the incident (also used as key).
task_id: ID of the user's task.
task_list_id: ID of the user's task list.
owner: Owner of this IncidentTask.
"""
task_id = db.StringProperty()
task_list_id = db.StringProperty(default='@default')
owner = db.StringProperty()
@property
def incident_id(self):
return self.key().name()
class SuggestionModel(db.Model):
"""Track data related to a model that was sent to the Prediction API.
Attributes:
name: The name of the model. Read-only. Set at creation time.
training_file: Name of the Google Storage object for this model.
Empty if never sent to Google Storage.
training_date: Time and date when training was confirmed complete.
training_tags: Tags included in the original training set.
export_file: Name of downloadble file containing training set.
Empty if never exported.
export_date: Time and date when data last exported.
export_tags: Tags included with exported data set.
ui_tags: Tags to be shown in the UI as examples of this model.
These tags could include new tags not yet added to a training or export.
"""
training_file = db.StringProperty()
training_date = db.DateTimeProperty()
training_tags = db.ListProperty(str)
training_examples = db.IntegerProperty(default=0)
export_file = db.StringProperty()
export_date = db.DateTimeProperty()
export_tags = db.ListProperty(str)
export_examples = db.IntegerProperty(default=0)
ui_tags = db.ListProperty(str)
@property
def name(self):
"""Get the Key name."""
return self.key().name()
@classmethod
def CreateMissingModel(cls, tag):
"""Create a new model for a tag, if necessary, and add tag to ui list.
Args:
tag: String name of a specific Tag.
"""
model_name = Tag.ModelCategory(tag)['model']
suggestion_model = cls.get_or_insert(model_name)
suggestion_model.AddUITags([tag])
suggestion_model.put()
def AddUITags(self, tags):
"""Add one or more tags to this model for display in UI.
This is strictly a convenience function for the UI and does not create a
canonical list. The canonical lists are in training_tags and export_tags
which contain all the tags present at the generation of those training sets.
If the model name in the tag does not match this current model
key_name then no change to the entity. If the model name matches
then the tag will be added to the ui_tags set.
Args:
tags: list of Strings
"""
ui_tags = [tag for tag in tags if Tag.ModelMatches(self.name, tag)]
self.ui_tags = list(set(ui_tags))
| [
[
8,
0,
0.0225,
0.0059,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0296,
0.0012,
0,
0.66,
0.0435,
426,
0,
1,
0,
0,
426,
0,
0
],
[
1,
0,
0.0307,
0.0012,
0,
0.66... | [
"\"\"\"Provides models for Au-to-do data types.\n\nThis module provides data types for Au-to-do data stored in the App Engine\ndatastore.\n\"\"\"",
"from datetime import datetime",
"from datetime import timedelta",
"import email.utils",
"import logging",
"import os",
"import re",
"from sets import Set... |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys,os
import re
import ConfigParser
import memcache
import urlparse
import smtplib
from email.mime.text import MIMEText
import urlparse
from BaseHTTPServer import HTTPServer,BaseHTTPRequestHandler
class RequestHandler(BaseHTTPRequestHandler):
def sendmail(self, email):
sender = 'admin@xxx.com'
mailto = email
#邮件信息
msg =MIMEText("new password is 365im")
msg['Subject'] = 'reg ok'
msg['to'] = mailto
msg['From'] = sender
#连接发送服务器
smtp = smtplib.SMTP('mail.xxx.com')
smtp.login('admin@xxx.com', 'password')
#发送
smtp.sendmail(sender,mailto,msg.as_string())
smtp.quit()
print 'Send OK'
def checkAccountValid(self, email): #返回0表示此邮箱可用
if len(email) < 6: #x@a.cn,最少6个符
return -1
if re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email) == None:
return -2
value = mc.get(email)
if (value == None):
return 0
return 1
def _writeheaders(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
def do_HEAD(self):
self._writeheaders()
def do_GET(self):
self._writeheaders()
#4011,4012,4013,4014 total 4 xmpp client, idle or busy can get from memcached
#self.wfile.write(self.path[0:14])
ret='0'
if self.path[0:12] == '/reg_new_user': #http://xxx/reg_new_user/user,pw,time,code
url=self.path[12:];
#ret=str2;
strlist=url.split(',')
if len(strlist)!=4:
ret='0';
else:
mail=strlist[0]
#pw=strlist[1]
time=strlist[2]
code=strlist[3]
if (self.checkAccountValid(mail) == 0):
value="365im"+"|"+time+code
mc.set(mail, value)
else:
ret='0'
self.wfile.write(ret)
host = ''
port = 5300
memcache_host='ip:port'
try:
config = ConfigParser.ConfigParser()
config.read('config.ini')
host=config.get('serverinfo', 'host')
port=config.get('serverinfo', 'port')
memcache_host = config.get('serverinfo', 'memcache')
print 'Init OK'
serveraddr = (host, int(port))
mc = memcache.Client([memcache_host],debug=0)
srvr = HTTPServer(serveraddr,RequestHandler)
srvr.serve_forever()
except:
print 'Open failled'
exit()
| [
[
1,
0,
0.0128,
0.0128,
0,
0.66,
0,
509,
0,
2,
0,
0,
509,
0,
0
],
[
1,
0,
0.0256,
0.0128,
0,
0.66,
0.1111,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.0385,
0.0128,
0,
... | [
"import sys,os",
"import re",
"import ConfigParser",
"import memcache",
"import urlparse",
"import smtplib",
"from email.mime.text import MIMEText",
"import urlparse",
"from BaseHTTPServer import HTTPServer,BaseHTTPRequestHandler",
"class RequestHandler(BaseHTTPRequestHandler):\n def sendmail(s... |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys,os
import re
import ConfigParser
import memcache
import urlparse
import smtplib
from email.mime.text import MIMEText
import urlparse
from BaseHTTPServer import HTTPServer,BaseHTTPRequestHandler
class RequestHandler(BaseHTTPRequestHandler):
def sendmail(self, email):
sender = 'admin@xxx.com'
mailto = email
#邮件信息
msg =MIMEText("new password is 365im")
msg['Subject'] = 'reg ok'
msg['to'] = mailto
msg['From'] = sender
#连接发送服务器
smtp = smtplib.SMTP('mail.xxx.com')
smtp.login('admin@xxx.com', 'password')
#发送
smtp.sendmail(sender,mailto,msg.as_string())
smtp.quit()
print 'Send OK'
def checkAccountValid(self, email): #返回0表示此邮箱可用
if len(email) < 6: #x@a.cn,最少6个符
return -1
if re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email) == None:
return -2
value = mc.get(email)
if (value == None):
return 0
return 1
def _writeheaders(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
def do_HEAD(self):
self._writeheaders()
def do_GET(self):
self._writeheaders()
#4011,4012,4013,4014 total 4 xmpp client, idle or busy can get from memcached
#self.wfile.write(self.path[0:14])
ret='0'
if self.path[0:12] == '/reg_new_user': #http://xxx/reg_new_user/user,pw,time,code
url=self.path[12:];
#ret=str2;
strlist=url.split(',')
if len(strlist)!=4:
ret='0';
else:
mail=strlist[0]
#pw=strlist[1]
time=strlist[2]
code=strlist[3]
if (self.checkAccountValid(mail) == 0):
value="365im"+"|"+time+code
mc.set(mail, value)
else:
ret='0'
self.wfile.write(ret)
host = ''
port = 5300
memcache_host='ip:port'
try:
config = ConfigParser.ConfigParser()
config.read('config.ini')
host=config.get('serverinfo', 'host')
port=config.get('serverinfo', 'port')
memcache_host = config.get('serverinfo', 'memcache')
print 'Init OK'
serveraddr = (host, int(port))
mc = memcache.Client([memcache_host],debug=0)
srvr = HTTPServer(serveraddr,RequestHandler)
srvr.serve_forever()
except:
print 'Open failled'
exit()
| [
[
1,
0,
0.0128,
0.0128,
0,
0.66,
0,
509,
0,
2,
0,
0,
509,
0,
0
],
[
1,
0,
0.0256,
0.0128,
0,
0.66,
0.1111,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.0385,
0.0128,
0,
... | [
"import sys,os",
"import re",
"import ConfigParser",
"import memcache",
"import urlparse",
"import smtplib",
"from email.mime.text import MIMEText",
"import urlparse",
"from BaseHTTPServer import HTTPServer,BaseHTTPRequestHandler",
"class RequestHandler(BaseHTTPRequestHandler):\n def sendmail(s... |
import sys, string, re, Queue
arith = ['sub', 'div', 'mod', 'cmple', 'add', 'mul', 'cmpeq', 'cmplt']
operators = ['-', '/', '%', '<=', '+', '*', '==', '<']
arith1 = ['neg']
local_size = 0;
# get operand
def getOperand(t, sline, access_local):
#GP
if sline[t] == 'GP':
print '(char*)global',
return t+1
#FP
elif sline[t] == 'FP':
if access_local:
print '(char*)&local[' + str(local_size/8 -1) + ']',
else:
print '(char*)param',
return t+1
#constant
elif sline[t].isdigit():
print sline[t],
return t+1
#address offsets and field offsets
elif sline[t].endswith('_base') or sline[t].endswith('_offset'):
if sline[t+1][0] == '-':
print '(' + str(int(sline[t+1])+8) + ')',
return -(t+2)
else:
print str(int(sline[t+1])-8),
return t+2
#register name
elif sline[t][0] == '(':
print 'r' + sline[t].strip('()'),
return t+1
#code label
elif sline[t][0] == '[':
print 'instr_' + sline[t].strip('[]'),
return t+1
#local variables
else:
if sline[t+1][0] == '-':
print 'local[' + str((local_size-int(sline[t+1].strip('-')))/8) + ']',
else:
print 'param[' + str(int(sline[t+1])/8-1) + ']',
return t+2
# get next operand start
def getStart(t, sline):
#GP
if sline[t] == 'GP':
return t+1
#FP
elif sline[t] == 'FP':
return t+1
#constant
elif sline[t].isdigit():
return t+1
#address offsets and field offsets
elif sline[t].endswith('base') or sline[t].endswith('_offsets'):
if sline[t+1][0] == '-':
return -(t+2)
else:
return t+2
#register name
elif sline[t][0] == '(':
return t+1
#code label
elif sline[t][0] == '[':
return t+1
#local variables
else:
return t+2
#----------------- Main -----------------#
#if len(sys.argv) != 2:
# print "please specify input file name"
# sys.exit(0)
#
#ifile = open(sys.argv[1], 'r')
#parameters
params = Queue.LifoQueue()
params_n = 0
parsing_main = 0
# Print out header of the file
print '#include <stdio.h>\n\
#include <stdlib.h>\n\
#include <string.h>\n\
#define WriteLine() printf("\\n");\n\
#define WriteLong(x) printf(" %lld", x);\n\
#define ReadLong(a) if (fscanf(stdin, "%lld", &a) != 1) a = 0;\n\
#define long long long\n\n'
print 'long global[4096];\n'
# parse the file line by line
#for line in ifile:
for line in sys.stdin:
sline = re.split(': | |#', line.rstrip('\n').lstrip(' '))
if sline[2] == 'nop':
continue
#print label for next instruction
if sline[2] != 'enter' and sline[2] != 'entrypc':
print 'instr_' + sline[1] + ':;\n\t',
#function start
if sline[2] == 'enter':
assert int(sline[3]) % 8 == 0, 'operand not divisible by 8';
if not parsing_main:
print 'void func_' + sline[1] + '(long* param) {\n',
else:
print 'void main() {\n',
if (sline[3] != '0'):
print 'long local[' + str(int(sline[3])/8) + '];\n',
local_size = int(sline[3]);
parsing_main = 0
#main start
if sline[2] == 'entrypc':
parsing_main = 1
#function return
elif sline[2] == 'ret':
print 'return;\n}\n',
#arithmatic
# elif sline[2] in arith:
# print 'long r' + sline[1] + ' =',
## t = getOperand(3, sline, 0)
## print operators[arith.index(sline[2])],
## if (t < 0):
## getOperand(-t, sline, 1)
## else:
## getOperand(t, sline, 0)
# t = getStart(3, sline)
# if (t < 0):
# getOperand(-t, sline, 1)
# else:
# getOperand(t, sline, 0)
# print operators[arith.index(sline[2])],
# getOperand(3, sline, 0)
# print ';\n',
elif sline[2] in arith:
print 'long r' + sline[1] + ' =',
t = getOperand(3, sline, 0)
print operators[arith.index(sline[2])],
if (t < 0):
getOperand(-t, sline, 1)
else:
getOperand(t, sline, 0)
print ';\n',
elif sline[2] in arith1:
print 'long r' + sline[1] + ' =',
t = getOperand(3, sline, 0)
print ' * (-1);\n',
#branch
elif sline[2] == 'br':
print 'goto ',
getOperand(3, sline, 0)
print ';\n',
elif sline[2] == 'blbs':
print 'if (',
t = getOperand(3, sline, 0)
print '!= 0) goto',
getOperand(t, sline, 0)
print ';\n',
elif sline[2] == 'blbc':
print 'if (',
t = getOperand(3, sline, 0)
print '== 0) goto',
getOperand(t, sline, 0)
print ';\n',
#data movement
elif sline[2] == 'load':
print 'long r' + sline[1] + ' = *(long*)',
getOperand(3, sline, 0)
print ';\n',
elif sline[2] == 'move':
t = getStart(3, sline);
getOperand(t, sline, 0)
print ' = ',
getOperand(3, sline, 0)
print ';\n',
elif sline[2] == 'store':
print '*(long*)',
t = getStart(3, sline)
getOperand(t, sline, 0)
print ' =',
getOperand(3, sline, 0)
print ';\n',
#I/O
elif sline[2] == 'write':
print 'WriteLong(',
getOperand(3, sline, 0)
print ');\n',
elif sline[2] == 'wrl':
print 'WriteLine();\n',
elif sline[2] == 'read':
#TODO: read didn't appear in all any tests.. need to be tested
print 'long r' + sline[1] + ';\n\t',
print 'ReadLong( r' + sline[1],
print ');\n',
#Parameter and call
elif sline[2] == 'param':
print 'long r' + sline[1] + ' = ',
getOperand(3, sline, 0)
print ';//input parameter\n',
params.put(sline[1])
params_n += 1
elif sline[2] == 'call':
param_name = 'param_' + sline[1]
print 'long* ' + param_name + ' = (long*)malloc(sizeof(long)*' + str(params_n+1) + ');\n',
params_n = 0;
while not params.empty():
tt = params.get();
print 'memcpy(' + param_name + '+' + str(params_n+1) + ', &r' + tt + ', sizeof(long));\n',
params_n += 1
params_n = 0
print 'func_' + sline[3].strip('[]') + '(' + param_name + ');\n',
print 'free (' + str(param_name) + ');\n',
sys.exit(0)
| [
[
1,
0,
0.0042,
0.0042,
0,
0.66,
0,
509,
0,
4,
0,
0,
509,
0,
0
],
[
14,
0,
0.0167,
0.0042,
0,
0.66,
0.0833,
899,
0,
0,
0,
0,
0,
5,
0
],
[
14,
0,
0.0209,
0.0042,
0,
... | [
"import sys, string, re, Queue",
"arith = ['sub', 'div', 'mod', 'cmple', 'add', 'mul', 'cmpeq', 'cmplt']",
"operators = ['-', '/', '%', '<=', '+', '*', '==', '<']",
"arith1 = ['neg']",
"local_size = 0;",
"def getOperand(t, sline, access_local):\n#GP\n if sline[t] == 'GP':\n print('(char*)global',)\n ... |
import logging
from web.webbase import *
from classes import classes
from imports.n3tagvalue import svalue
def gettag(t):
return t if t[0].isalpha() else t[1:]
def getprop(t):
t=gettag(t)
ns = 'ofx' if t.isupper() else 'a3'
return ns+':'+t
def getclass(items,typ,acctids,dtstart,dtend,uniqueids,fltrextension=''):
"""Collect objects in items
typ - the class to be collected as defined in classes module.
acctids - limit results to lines from ACCTID which is one of the values in the list
dtstart, dtend - in the class definition look for all the tags that make up the key for an object.
if one of those tags begin with DT then it is taken to be the time-key (e.g. DTASOF).
take only lines that the time-key falls in the range.
fltrextension - manually define additional SPARQL filter lines.
"""
c=classes[typ]
ms=c['key']+c['mandatory']
fltr=""
if acctids and 'ACCTID' not in ms: ms.append('ACCTID')
if acctids and len(acctids)<10:
fltr+='filter ('
fltr+='( ?ACCTID = "'+'" ) || ( ?ACCTID = "'.join(acctids)+'" )'
fltr+=')\n'
if dtstart or dtend:
dtkey=''
for k in c['key']:
if k.upper()[0:2]=="DT":
dtkey=k
break
if dtstart and dtkey:
d,t=svalue(dtkey,list(dtstart)[0])
fltr+='filter ( ?%s >= "%s"%s )\n'%(dtkey,d,t)
if dtend and dtkey:
dtend=list(dtend)[0]
# make sure dtend refers to the last moment of the day
if len(dtend)==8 or len(dtend)==6:
dtend+='235959'
d,t=svalue(dtkey,dtend)
fltr+='filter ( ?%s <= "%s"%s )\n'%(dtkey,d,t)
if uniqueids and 'UNIQUEID' not in ms: ms.append('UNIQUEID')
if uniqueids and len(uniqueids)<10:
fltr+='filter ('
fltr+='( ?UNIQUEID = "'+'" ) || ( ?UNIQUEID = "'.join(uniqueids)+'" )'
fltr+=')\n'
fltr+=fltrextension
os=c['optional']
vs=ms+os
#q="""PREFIX ofx:<http://www.w3.org/2000/10/swap/pim/ofx#>
#PREFIX a3:<http://code.google.com/p/3account/wiki/Schema#>
#PREFIX xsd:<http://www.w3.org/2001/XMLSchema#>
q="""select distinct"""
for v in vs:
q+=" ?"+gettag(v)
q+=" {\n"
for v in ms:
q+=" ?x "+getprop(v)+" ?"+gettag(v)+".\n"
for v in os:
q+="optional { ?x "+getprop(v)+" ?"+gettag(v)+"}.\n"
q+=fltr
q+="}"
logging.info("SPARQL query:\n%s"%q)
rows=con.query(q)
# do post filtering on acctids and uniqueids in case they have more than
# 10 elements
# there is no such problem with dtstart/end
for row in rows:
if (acctids and
(row['ACCTID']['value']
if isinstance(row['ACCTID'],dict)
else row['ACCTID'][0])
not in acctids): continue
if (uniqueids and
(row['UNIQUEID']['value']
if isinstance(row['UNIQUEID'],dict)
else row['UNIQUEID'][0])
not in uniqueids): continue
row2dict(items,row,c['key'],vs,typ=typ)
def getassets(items,uniqueids):
if uniqueids:
getclass(items,'asset',None,None,None,uniqueids)
for o in items.itervalues():
if o['type']!='asset': continue
if 'assetClass' not in o:
if 'FIASSETCLASS' in o:
o['assetClass']=o['FIASSETCLASS']
elif 'ASSETCLASS' in o:
o['assetClass']=o['ASSETCLASS']
if 'SECTYPE' in o:
nm = o['SECTYPE'][0]+'TYPE'
if nm in o:
o['SECTYPE'][0] =o['SECTYPE'][0] + ' ' + o[nm][0]
def getassetspercents(items,uniqueids):
"""Find all Asset objects with uniqueids and to each add a 'percent' dictionary which gives
the percentage of each ASSETCLASS for that asset"""
getassets(items,uniqueids)
assetpercents={}
getclass(assetpercents,'assetpercent',None,None,None,uniqueids)
for o in assetpercents.itervalues():
uniqueid=o['UNIQUEID'][0]
if 'percent' not in items[uniqueid]:
items[uniqueid]['percent']={}
items[uniqueid]['percent'][o['ASSETCLASS'][0]]=o['PERCENT'][0]
def getpositions(items,acctids,dtstart,dtend,uniqueids=None):
# Make sure we dont get entries from the same account (ACCTID) from a latter date
# then the maximal date that falls in the range
dtend1=list(dtend)[0]
# make sure dtend refers to the last moment of the day
if len(dtend1)==8 or len(dtend1)==6:
dtend1+='235959'
d,t=svalue('DTASOF',dtend1)
ext=('optional {'+
'?y2 ofx:DTASOF ?dt2 . '+
'?y2 ofx:ACCTID ?ACCTID . '+
'filter( ?dt2 > ?DTASOF ).'+
'filter( ?dt2 <= "%s"%s )'%(d,t)+
'}. '+
'filter(!bound(?dt2)).\n')
getclass(items,'position',acctids,dtstart,dtend,uniqueids,fltrextension=ext)
def gettransactions(items,acctids,dtstart,dtend,uniqueids=None):
getclass(items,'transaction',acctids,dtstart,dtend,uniqueids)
getclass(items,'checking',acctids,dtstart,dtend,None)
def getchecking(items,acctids,dtstart,dtend,uniqueids=None):
getclass(items,'checking',acctids,dtstart,dtend,None)
| [
[
1,
0,
0.0075,
0.0075,
0,
0.66,
0,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0149,
0.0075,
0,
0.66,
0.0909,
686,
0,
1,
0,
0,
686,
0,
0
],
[
1,
0,
0.0224,
0.0075,
0,
... | [
"import logging",
"from web.webbase import *",
"from classes import classes",
"from imports.n3tagvalue import svalue",
"def gettag(t):\n return t if t[0].isalpha() else t[1:]",
" return t if t[0].isalpha() else t[1:]",
"def getprop(t):\n t=gettag(t)\n ns = 'ofx' if t.isupper() else 'a3'\n ... |
import os
from rdflib import Namespace, BNode, Literal, RDF, URIRef
from semprog.pysesame import connection
import cherrypy
from mako.lookup import TemplateLookup
import urllib
from simplejson import dumps
from classes import classes
user_cfg = os.path.expanduser("~/cherrypy.cfg")
if os.path.exists(user_cfg):
cherrypy_cfg = user_cfg
elif os.path.exists("cherrypy.cfg"):
cherrypy_cfg = "cherrypy.cfg"
else:
cherrypy_cfg = None
if cherrypy_cfg:
cherrypy.config.update(cherrypy_cfg)
else:
cherrypy.config.update({'server.socket_port':8000})
url=cherrypy.config.setdefault('sesame.url','http://localhost:8080')
url+='/openrdf-sesame/'
lookup = TemplateLookup(directories=['templates'],
output_encoding='utf-8',encoding_errors='replace',
input_encoding='utf-8')
con=connection(url)
repository=cherrypy.config.setdefault('sesame.repository','3account')
con.use_repository(repository)
OFX = Namespace("http://www.w3.org/2000/10/swap/pim/ofx#")
OFXH = Namespace("http://www.w3.org/2000/10/swap/pim/ofx-headers#")
XSD = Namespace("http://www.w3.org/2001/XMLSchema#")
A3 = Namespace("http://code.google.com/p/3account/wiki/Schema#")
con.addnamespace('ofx',OFX)
con.addnamespace('ofxh',OFXH)
con.addnamespace('xsd',XSD)
con.addnamespace('a3',A3)
def lst(l):
"""make sure something is a list"""
if isinstance(l,list):
return l
else:
return [l]
def row2class(items,row,typ):
c=classes[typ]
row2dict(items,row,c['key'],c['key']+c['mandatory']+c['optional'],typ)
def row2dict(objDict,row,idtag,tags,typ=None):
""" Build objects out of flat-lines!
row is a dict with tag:value pairs.
The value in row is either a list of literals (str,float) or a dict of the form {'value':value,'datatype':'xsd:...'}.
The function searches for an object in row and if found adds it to objDict.
The key to the object in objDict is a str of a single value or str of a list of multiple values.
In this way an object with a simple key is easy to access.
The value(s) for the key are taken from the values assigned in row to the tag(s) defined by idtag.
If not all idtag(s) are found in row then the object is not "in" the row.
An object is dict stored as a value in objDict.
The object may have attributes which are tag:[value,...] pairs were value (str,float) are taken from the row
and the tag are taken from tags list.
objDict={...
str(row[idtag[0]]):{'label':key, 'type':typ, tags[0]:[..., row[tags[0]], ...}
or
str([row[idtag[0]], row[idtag[1]],...]):{'label':key, 'type':typ, tags[0]:[..., row[tags[0]], ...}
...}
Only unique values are stored, and if the tag in tags is prefixed with '+' the value (float) is added
"""
idtag = lst(idtag)
if any(t not in row for t in idtag): return
nrow =dict((t,[v['value']] if isinstance(v,dict) else v) for t,v in row.iteritems())
key=str(nrow[idtag[0]][0] if len(idtag)==1
else [nrow[t][0] for t in idtag])
if key not in objDict: objDict[key] = {'label':key}
if typ: objDict[key]['type']=typ
for tag in tags:
add=False
if tag.startswith('+'):
tag=tag[1:]
add=True
if tag not in row: continue
for v in nrow[tag]:
if (isinstance(row[tag],dict) and
row[tag].get('datatype','') == str(XSD.term('decimal'))):
v = float(v)
if tag not in objDict[key]:
objDict[key][tag]=[0.] if add else []
if add:
objDict[key][tag][0]+=float(v)
elif v not in objDict[key][tag]:
objDict[key][tag].append(v)
from htmlentitydefs import codepoint2name
def htmlentities(u):
result = []
for c in u:
if ord(c) in codepoint2name:
result.append('&%s;' % codepoint2name[ord(c)])
elif c==' ':
result.append(' ')
else:
result.append(c)
return ''.join(result)
def printCleanup(trans):
""" cleanup for printing """
if not isinstance(trans,list): trans=[trans]
for tran in trans:
for tag in tran.iterkeys():
tran[tag] = [v[v.rfind("#")+1:] if isinstance(v,basestring) else v for v in tran[tag]]
tran[tag] = [htmlentities("%.2f"%v if isinstance(v,float) else v) for v in tran[tag]]
| [
[
1,
0,
0.0088,
0.0088,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0175,
0.0088,
0,
0.66,
0.0345,
926,
0,
5,
0,
0,
926,
0,
0
],
[
1,
0,
0.0263,
0.0088,
0,
... | [
"import os",
"from rdflib import Namespace, BNode, Literal, RDF, URIRef",
"from semprog.pysesame import connection",
"import cherrypy",
"from mako.lookup import TemplateLookup",
"import urllib",
"from simplejson import dumps",
"from classes import classes",
"user_cfg = os.path.expanduser(\"~/cherryp... |
from web.webbase import *
from get import getclass,getpositions,getassets
def accounts():
items={}
getclass(items,'account',None,None,None,None)
return items.values()
def accounttotals(acctids,dtstart,dtend):
# collect information on positions
positions={}
getpositions(positions,acctids,dtstart,dtend)
# add information on assets
assets={}
uniqueids=set(o['UNIQUEID'][0] for o in positions.itervalues()) # get all uniqueids in use
getassets(assets,uniqueids)
# collect information on accounts
accounts={}
# get general information on accounts which is NOT time dependent
getclass(accounts,'account',acctids,None,None,None)
# collect information on statements
getclass(accounts,'accountdtasof',acctids,dtstart,dtend,None)
# for each account and DTASOF, sum up MKTVAL of all positions.
for o in positions.itervalues():
if o['type']!='position': continue
if 'MKTVAL' not in o: continue
row={'ACCTID':o['ACCTID'],
'DTASOF':o['DTASOF'],
'mktval':o['MKTVAL']}
# count monetmarket that are marked as CASH
if 'CASH' in assets.get(o['UNIQUEID'][0],{}).get('assetClass',[]):
row['moneymrkt']=o['MKTVAL']
row2class(accounts,row,"accountdtasof")
# Add cash to total value
for o in accounts.itervalues():
if o['type']!='accountdtasof': continue
if 'AVAILCASH' not in o:
o['AVAILCASH']=o.get('moneymrkt',[0.])
if 'mktval' in o:
o['mktval'][0]+=o['AVAILCASH'][0]-o.get('moneymrkt',[0.])[0]
else:
o['mktval']=o['AVAILCASH']
o['mktval'][0]+=o.get('BALAMT',[0.])[0]
accountdtasofs = [o for o in accounts.itervalues()
if o['type']=='accountdtasof']
for o in accountdtasofs:
ACCTID=o['ACCTID'][0]
accountGroups=accounts[ACCTID].get('accountGroup',[])
for accountGroup in accountGroups:
row2class(accounts,{'accountGroup':[accountGroup],
'dayasof':[o['DTASOF'][0][:10]],
'mKTVAL':o['mktval']},'groupdayasof')
return accounts.values()
| [
[
1,
0,
0.0179,
0.0179,
0,
0.66,
0,
686,
0,
1,
0,
0,
686,
0,
0
],
[
1,
0,
0.0357,
0.0179,
0,
0.66,
0.3333,
607,
0,
3,
0,
0,
607,
0,
0
],
[
2,
0,
0.0982,
0.0714,
0,
... | [
"from web.webbase import *",
"from get import getclass,getpositions,getassets",
"def accounts():\n items={}\n getclass(items,'account',None,None,None,None)\n return items.values()",
" items={}",
" getclass(items,'account',None,None,None,None)",
" return items.values()",
"def accounttot... |
from get import *
def asset(uniqueids):
items={}
getpositions(items,None,None,None,uniqueids)
gettransactions(items,None,None,None,uniqueids)
getassets(items,uniqueids)
return items.values() | [
[
1,
0,
0.125,
0.125,
0,
0.66,
0,
607,
0,
1,
0,
0,
607,
0,
0
],
[
2,
0,
0.6875,
0.75,
0,
0.66,
1,
969,
0,
1,
1,
0,
0,
0,
4
],
[
14,
1,
0.5,
0.125,
1,
0.13,
0,
... | [
"from get import *",
"def asset(uniqueids):\n items={}\n getpositions(items,None,None,None,uniqueids)\n gettransactions(items,None,None,None,uniqueids)\n getassets(items,uniqueids)\n return items.values()",
" items={}",
" getpositions(items,None,None,None,uniqueids)",
" gettransactio... |
"""A dictonary of all the classes of objects that can be found in flat lines.
the keys are the class types (names). The values describe each class:
* The key list describes all the tags that must appear in a flat line
in order for an object of the class to be said to be in the line.
The object is uniquely identified by the values these tags receive
in the flat line. The keys are also properties of the object.
* In a key each tag receives one value (as appose to other properties that can
have multiple values.) If multiple key values appear in a single flat line
the different objects of the same class that are identified by the different
key value should be smushed togther (see pending patent US20080215619)
* The mandatory and optional lists specify additional properties of the object.
The values assigned to the properties are accumulated from all the lines
in which an object appears. Its possible for multiple values to be defined
in the same flat line. By default, only uniqiue values are taken, but if
the + sign is prefixed to a property tag, all the accumulated values are
summed togther.
* In addition properties in the mandatory list must appear in every flat line
in which the object is said to be in. However, these properties are not used
as part of the object's key. The mandatory list acts as a filter on the lines
in which an object can appear.
"""
classes={
'account':{
'key':['ACCTID'],
'mandatory':[],
'optional':['accountGroup','CURDEF','aCCTTYPE','ACCTTYPE']},
'asset':{
'key':['UNIQUEID'],
'mandatory':[],
'optional':['SECNAME','TICKER','assetClass',
'FIASSETCLASS','ASSETCLASS','SECTYPE',
'STOCKTYPE', 'OPTTYPE', 'DEBTTYPE', 'MFTYPE', 'DTMAT']},
'assetpercent':{
'key':['UNIQUEID','ASSETCLASS'],
'mandatory':['PERCENT'],
'optional':[]},
'position':{
'key':['ACCTID','DTASOF','UNIQUEID'],
'mandatory':['UNITS'],
'optional':['UNITPRICE','MKTVAL','DTPRICEASOF']},
'transaction':{
'key':['FITID', 'DTTRADE'],
'mandatory':['ACCTID'],
'optional':['DTSETTLE', 'UNIQUEID','SECNAME',
'tran', 'TRNTYPE',
'units', 'UNITPRICE','CURRENCY',
'TOTAL', 'FEES', 'TAXES',
'mEMO']
},
'checking':{
'key':['FITID','DTPOSTED'],
'mandatory':['ACCTID'],
'optional':['NAME','PAYEEID','TRNTYPE',
'TRNAMT', 'mEMO']
},
# the following are "statement" objects
# the tags mKTVAL, mktval and moneymrkt can be computed by web server
'groupdayasof':{
'key':['accountGroup','dayasof'],
'mandatory':[],
'optional':['+mKTVAL']},
'accountdtasof':{
'key':['ACCTID','DTASOF'],
'mandatory':[],
'optional':['AVAILCASH','BALAMT','bALAMT','+mktval','+moneymrkt']},
}
| [
[
8,
0,
0.1667,
0.3182,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.6667,
0.6818,
0,
0.66,
1,
124,
0,
0,
0,
0,
0,
6,
0
]
] | [
"\"\"\"A dictonary of all the classes of objects that can be found in flat lines.\nthe keys are the class types (names). The values describe each class:\n * The key list describes all the tags that must appear in a flat line\n in order for an object of the class to be said to be in the line.\n The object is uni... |
from get import *
def transactions(acctids,dtstart,dtend):
items={}
gettransactions(items,acctids,dtstart,dtend)
# add information on assets
uniqueids=set(o['UNIQUEID'][0]
for o in items.itervalues()
if 'UNIQUEID' in o) # get all uniqueids in use
getassets(items,uniqueids)
return items.values()
| [
[
1,
0,
0.0909,
0.0909,
0,
0.66,
0,
607,
0,
1,
0,
0,
607,
0,
0
],
[
2,
0,
0.6364,
0.8182,
0,
0.66,
1,
587,
0,
3,
1,
0,
0,
0,
5
],
[
14,
1,
0.3636,
0.0909,
1,
0.95,
... | [
"from get import *",
"def transactions(acctids,dtstart,dtend):\n items={}\n gettransactions(items,acctids,dtstart,dtend)\n # add information on assets\n uniqueids=set(o['UNIQUEID'][0]\n for o in items.itervalues()\n if 'UNIQUEID' in o) # get all uniqueids in use\n ... |
from get import *
def positions(acctids,dtstart,dtend):
items={}
getpositions(items,acctids,dtstart,dtend)
# add information on assets
uniqueids=set(o['UNIQUEID'][0] for o in items.itervalues()) # get all uniqueids in use
getassets(items,uniqueids)
return items.values()
def positionspercents(acctids,dtstart,dtend):
"""Return positions and break down positions according to percentage in each ASSETCLASS.
Each position object has an additional field ASSETCLASS."""
positions={}
getpositions(positions,acctids,dtstart,dtend)
# add information on assets
uniqueids=set(o['UNIQUEID'][0] for o in positions.itervalues()) # get all uniqueids in use
assets={}
getassetspercents(assets,uniqueids)
newitems=[]
for key,p in positions.iteritems():
uniqueid=p['UNIQUEID'][0]
mktval = p['MKTVAL'][0]
if mktval<1.: continue
a = assets[uniqueid]
if 'percent' in a:
undefinedmktval=mktval
for assetclass,percent in a['percent'].iteritems():
newp = p.copy()
newp['label']=key+assetclass
newp['ASSETCLASS']=[assetclass]
newmktval=mktval*float(percent)/100.
newp['MKTVAL']=[str(newmktval)]
if newmktval>1.:
newitems.append(newp)
undefinedmktval-=newmktval
if undefinedmktval>1.:
p['MKTVAL'][0]=str(undefinedmktval)
newitems.append(p)
elif 'assetClass' in a:
p['ASSETCLASS']=a['assetClass']
newitems.append(p)
else:
newitems.append(p)
return assets.values()+newitems | [
[
1,
0,
0.0213,
0.0213,
0,
0.66,
0,
607,
0,
1,
0,
0,
607,
0,
0
],
[
2,
0,
0.1277,
0.1489,
0,
0.66,
0.5,
320,
0,
3,
1,
0,
0,
0,
5
],
[
14,
1,
0.0851,
0.0213,
1,
0.19... | [
"from get import *",
"def positions(acctids,dtstart,dtend):\n items={}\n getpositions(items,acctids,dtstart,dtend)\n # add information on assets\n uniqueids=set(o['UNIQUEID'][0] for o in items.itervalues()) # get all uniqueids in use\n getassets(items,uniqueids)\n return items.values()",
" ... |
from get import *
def checking(acctids,dtstart,dtend):
items={}
getchecking(items,acctids,dtstart,dtend)
return items.values()
| [
[
1,
0,
0.1667,
0.1667,
0,
0.66,
0,
607,
0,
1,
0,
0,
607,
0,
0
],
[
2,
0,
0.75,
0.6667,
0,
0.66,
1,
732,
0,
3,
1,
0,
0,
0,
2
],
[
14,
1,
0.6667,
0.1667,
1,
0.23,
... | [
"from get import *",
"def checking(acctids,dtstart,dtend):\n items={}\n getchecking(items,acctids,dtstart,dtend)\n return items.values()",
" items={}",
" getchecking(items,acctids,dtstart,dtend)",
" return items.values()"
] |
__version__ = "$Id$"
import os
import threading
import random
import urllib2
import cherrypy
from mako.template import Template
from mako.lookup import TemplateLookup
from urllib import quote_plus
from web.webbase import *
from web.position import positions,positionspercents
from web.asset import asset
from web.account import accounts,accounttotals
from web.transaction import transactions
from web.checking import checking
from cherrypy.lib.static import serve_file
from optparse import OptionParser
import logging
#Crazy line needed for OS X 10.6
urllib2.install_opener(urllib2.build_opener())
current_dir = os.path.dirname(os.path.abspath(__file__))
class Main(object):
def __init__(self):
self.lock=threading.Lock()
self.msgs={}
self.lastFetch=None
def jmsg(self,func,**msg):
"""msgs communicate url parameters between the method that fetches the web page
and fetch_json that generates the content for that web page:
the url parameters are kept in a dictonary (msg) that also hold the key 'func' which gives
a pointer to a function that accepts msg as single parameter and returns the JSON response.
each msg is held with a random key which is also givn to the web page.
the web page using its Mako template pulls the JSON information using fetch_json?msgkey=<msgkey>"""
msg['func']=func
# instead of using a random msgkey str(random.getrandbits(30))
# use a deterministic hash of msg which is assumed to have values that are eitehr str or [str]
msgkey=()
keys=msg.keys()
keys.sort()
for key in keys:
value = msg[key]
if isinstance(value,list):
value.sort()
value=tuple(value)
msgkey+=(key,value)
msgkey=str(abs(hash(msgkey)))
self.lock.acquire()
if msgkey not in self.msgs:
self.msgs[msgkey]=msg
self.lock.release()
return msgkey
@cherrypy.expose
def fetch_json(self,jmsg):
self.lock.acquire()
if jmsg=='last':
jmsg=self.lastFetch
assert(jmsg in self.msgs)
msg=self.msgs[jmsg]
#del self.msgs[jmsg] # each msg can be used once
if 'items' in msg:
items=msg['items']
else:
func=msg['func']
args=msg.copy()
del args['func']
for k,v in args.iteritems():
if isinstance(v,basestring):
args[k]=set([v]) if v else set()
else:
args[k]=set(v)
items = func(**args)
msg['items']=items
self.lastFetch=jmsg
self.lock.release()
return dumps({'items':items})
@cherrypy.expose
def index(self):
url=cherrypy.config.get('sesame.url')
repository=cherrypy.config.get('sesame.repository')
url+='/openrdf-workbench/repositories/'+repository
t=lookup.get_template('index.html')
return t.render(url=url,config=cherrypy.config)
@cherrypy.expose
def transactions(self,DTSTART,DTEND,ACCTID):
t=lookup.get_template('transactions.html')
return t.render(jmsg=self.jmsg(transactions,acctids=ACCTID,
dtstart=DTSTART,dtend=DTEND),
config=cherrypy.config)
@cherrypy.expose
def checking(self,DTSTART,DTEND,ACCTID):
t=lookup.get_template('checking.html')
return t.render(jmsg=self.jmsg(checking,acctids=ACCTID,
dtstart=DTSTART,dtend=DTEND),
config=cherrypy.config)
@cherrypy.expose
def positions(self,DTSTART,DTEND,ACCTID):
t=lookup.get_template('positions.html')
return t.render(jmsg=self.jmsg(positions,acctids=ACCTID,
dtstart=DTSTART,dtend=DTEND),
config=cherrypy.config)
@cherrypy.expose
def positionspercents(self,DTSTART,DTEND,ACCTID):
t=lookup.get_template('positionspercents.html')
return t.render(jmsg=self.jmsg(positionspercents,acctids=ACCTID,
dtstart=DTSTART,dtend=DTEND),
config=cherrypy.config)
@cherrypy.expose
def accounttotals(self,DTSTART,DTEND,ACCTID):
t=lookup.get_template('accounttotals.html')
return t.render(jmsg=self.jmsg(accounttotals,acctids=ACCTID,
dtstart=DTSTART,dtend=DTEND),
config=cherrypy.config)
@cherrypy.expose
def accounts(self):
t=lookup.get_template('accounts.html')
return t.render(jmsg=self.jmsg(accounts),config=cherrypy.config)
@cherrypy.expose
def asset(self,UNIQUEID):
t=lookup.get_template('asset.html')
return t.render(jmsg=self.jmsg(asset,uniqueids=UNIQUEID),
config=cherrypy.config)
@cherrypy.expose
def item(self):
t=lookup.get_template('item.html')
return t.render(jmsg='last',config=cherrypy.config)
def main():
parser = OptionParser(usage="usage: %prog [options]",
description="Webserver front-end for 3account",
version=__version__)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
(options, args) = parser.parse_args()
if options.verbose:
logging.basicConfig(level=logging.INFO)
cherrypy.log.error_log.setLevel(logging.INFO)
cherrypy.log.access_log.setLevel(logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
cherrypy.log.error_log.setLevel(logging.WARNING)
cherrypy.log.access_log.setLevel(logging.WARNING)
conf = {'/css': {'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(current_dir, 'static','css')},
'/js': {'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(current_dir, 'static','js')},
'/__history__.html': {'tools.staticfile.on': True, # for Back and Forward buttons in the browser works as undo and redo
'tools.staticfile.filename':
os.path.join(current_dir, 'static','__history__.html')},
}
cherrypy.quickstart(Main(),'/',config=conf)
if __name__ == '__main__':
main()
| [
[
14,
0,
0.0063,
0.0063,
0,
0.66,
0,
162,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.0127,
0.0063,
0,
0.66,
0.0455,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.019,
0.0063,
0,
0.... | [
"__version__ = \"$Id$\"",
"import os",
"import threading",
"import random",
"import urllib2",
"import cherrypy",
"from mako.template import Template",
"from mako.lookup import TemplateLookup",
"from urllib import quote_plus",
"from web.webbase import *",
"from web.position import positions,posit... |
# Load an RDF file into local sesame server
#import os
#import sys
#from optparse import OptionParser
#from httplib import BadStatusLine
from rdflib.graph import ConjunctiveGraph
from semprog import pysesame
__version__ = "$Id$"
def loadGraph2Server(g,url,fname=None):
data = g.serialize(format='xml')
c = pysesame.connection(url)
c.use_repository('3account')
c.putdata(data,context=fname)
def loadFile2Server(fname,url,format,context=None):
g = ConjunctiveGraph()
if isinstance(fname,str):
fp = open(fname)
if not context:
context = os.path.basename(fname)
else:
fp=fname
g.parse(fp,format=format,publicID=context)
loadGraph2Server(g,url,"<file://%s>"%context)
| [
[
1,
0,
0.24,
0.04,
0,
0.66,
0,
752,
0,
1,
0,
0,
752,
0,
0
],
[
1,
0,
0.28,
0.04,
0,
0.66,
0.25,
366,
0,
1,
0,
0,
366,
0,
0
],
[
14,
0,
0.32,
0.04,
0,
0.66,
0.5... | [
"from rdflib.graph import ConjunctiveGraph",
"from semprog import pysesame",
"__version__ = \"$Id$\"",
"def loadGraph2Server(g,url,fname=None):\n data = g.serialize(format='xml')\n c = pysesame.connection(url)\n c.use_repository('3account')\n c.putdata(data,context=fname)",
" data = g.seriali... |
########################################################################
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
########################################################################
"""Convert OFX v2.x or XML files to flat lines.
"""
help="""Convert OFX v2.x or XML files to flat lines.
"""
from ofx2xml import ofx2xml
import ofxml2flat
def flat(fin,fout,context=None,hashing=False):
ofxml2flat.flat(fin,fout,context,hashing)
| [
[
8,
0,
0.6607,
0.0714,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.7321,
0.0714,
0,
0.66,
0.25,
868,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.7857,
0.0357,
0,
0.66,
... | [
"\"\"\"Convert OFX v2.x or XML files to flat lines.\n\"\"\"",
"help=\"\"\"Convert OFX v2.x or XML files to flat lines.\n\"\"\"",
"from ofx2xml import ofx2xml",
"import ofxml2flat",
"def flat(fin,fout,context=None,hashing=False):\n\tofxml2flat.flat(fin,fout,context,hashing)",
"\tofxml2flat.flat(fin,fout,co... |
########################################################################
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
########################################################################
"""Convert OFX v1.x or QFX files to flat lines.
This is done in two stages.
The first converts the OFX to N3 file that keeps the structure of the original
OFX file.
The second stage converts the N3 file to a graph and then unstructure it.
"""
help="""Convert OFX v1.x file (MS-Money or Quicken Web Connect) to
flat OFX-RDF (in memory) and load to Sesame server
"""
import logging
import ofx2n3
from rdflib.graph import *
from rdflib.namespace import *
from rdflib.term import *
OFX = Namespace("http://www.w3.org/2000/10/swap/pim/ofx#")
A3 = Namespace("http://code.google.com/p/3account/wiki/Schema#")
XSD = Namespace("http://www.w3.org/2001/XMLSchema#")
FS = Namespace("file://")
flatLine = A3["flatLine"]
NS={'ofx':OFX, 'a3':A3}
def getSimplifiedName(uri):
if "#" in uri: return uri[uri.rfind("#") + 1:]
return uri[uri.rfind("/") + 1:]
def pre(fname,fout,hashing=False):
ofx2n3.flat(fname,fout,hashing=hashing)
def cleanval(v):
if not isinstance(v,URIRef):
return v
v=str(v)
for ns in NS.itervalues():
if v.startswith(str(ns)):
return Literal(v[len(str(ns)):])
return Literal(v)
def cleanvar(l):
l = l[l.rfind(":")+1:] if ":" in l else l
l = l if l[0].isalpha() else l[1:]
return l
def vars(lst):
res=[]
for l in lst:
assert isinstance(l,str)
res.append("?"+cleanvar(l))
return " ".join(res)
def pairs(x,lst):
res=[]
for l in lst:
assert isinstance(l,str)
if l[0]=="+":
l=l[1:]
res[-2]+=" ; ofx:"+l.upper()+" ?"+l
else:
if l[0]=="?":
sstr=" optional { ?"+x+" "
estr=" } .\n"
l=l[1:]
else:
sstr="?"+x+" "
estr=" .\n"
while ":" in l:
sstr+="ofx:"+l[:l.find(":")]+" [ "
estr=" ]"+estr
l=l[l.find(":")+1:]
sstr+="ofx:"+l.upper()+" ?"+l
res.append(sstr)
res.append(estr)
return " ".join(res)
def bprt(f,bNode,lst,vals):
for i,t in enumerate(lst):
v=vals[i]
if v:
t=cleanvar(t)
v=cleanval(v)
if t.isupper():
t=OFX[t]
else:
t=A3[t]
f.add((bNode,t,v))
def flatseclist(g,f,context):
secinfoT=["SECID:UNIQUEID","+UNIQUEIDTYPE","SECNAME",]
secinfoO=["?TICKER","?FIID","?RATING","?UNITPRICE","?dtasof",
"?CURRENCY:CURRATE","+CURSYM","?MEMO"]
infoTs = [("DEBT",["PARVALUE",
"DEBTTYPE",
"?DEBTCLASS",
"?COUPONRT",
"?DTCOUPON","?COUPONFREQ",
"?CALLPRICE","?YIELDTOCALL","?DTCALL","?CALLTYPE",
"?YIELDTOMAT","?DTMAT","?ASSETCLASS","?FIASSETCLASS"
]),
("MF",["?MFTYPE","?YIELD","?DTYIELDASOF",
"?MFASSETCLASS:PORTION:ASSETCLASS","+PERCENT",
"?FIMFASSETCLASS:FIPORTION:FIASSETCLASS","+PERCENT"]),
("OPT",["OPTTYPE","STRIKEPRICE","DTEXPIRE","SHPERCTRCT",
"?secid","?ASSETCLASS","?FIASSETCLASS"]),
("OTHER",["?TYPEDESC","?ASSETCLASS","?FIASSETCLASS"]),
("STOCK",["?STOCKTYPE","?YIELD","?DTYIELDASOF","?ASSETCLASS",
"?FIASSETCLASS"])]
for sectype,infoT in infoTs:
secinfos=g.query("select "+vars(secinfoT+secinfoO+infoT)+" {\n"+
"[ ofx:SECLIST [ ?SECTYPE ?x ] ] .\n"+
"?x ofx:SECINFO ?y .\n"+
pairs("y",secinfoT)+
pairs("x",infoT)+
pairs("y",secinfoO)+ # crazy bug in librdf 2.4.2
"}",
initNs=NS,initBindings={"SECTYPE":OFX[sectype+"INFO"]})
for secinfo in secinfos:
bNode=BNode()
f.add((context,flatLine,bNode))
f.add((bNode,
OFX["SECTYPE"],
Literal(getSimplifiedName(sectype))))
bprt(f,bNode,secinfoT+secinfoO+infoT,secinfo)
def flatinv(g,f,acctid,context):
invstmtrT=["INVACCTFROM:ACCTID","+BROKERID","CURDEF","DTASOF"]
invstmtrs=g.query("select "+vars(invstmtrT)+"{"+pairs("x",invstmtrT)+"}",
initNs=NS,initBindings={"ACCTID":acctid})
assert len(invstmtrs)==1
for invstmtr in invstmtrs:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
f.add((bNode,A3["aCCTTYPE"],Literal("INVESTMENT")))
#INVPOSLIST
invposT=["SECID:UNIQUEID","+UNIQUEIDTYPE",
"HELDINACCT","POSTYPE","UNITS","UNITPRICE","MKTVAL","DTPRICEASOF",
"?CURRENCY:CURRATE","+CURSYM","?memo"]
invposs=g.query("select ?pos "+vars(invposT)+"{"+
pairs("x",["INVACCTFROM:ACCTID"])+
"?x ofx:INVPOSLIST [ ?pos [ ofx:INVPOS ?y ] ] . "+
pairs("y",invposT)+"}",
initNs=NS,initBindings={"ACCTID":acctid})
for invpos in invposs:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,["pos"]+invposT,invpos)
#INVBAL
invbalT=["INVBAL:AVAILCASH","+MARGINBALANCE","+SHORTBALANCE"]
invbals=g.query("select "+vars(invbalT)+"{"+
pairs("x",["INVACCTFROM:ACCTID"])+
pairs("x",invbalT)+"}",
initNs=NS,initBindings={"ACCTID":acctid})
for invbal in invbals:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,invbalT,invbal)
#INVTRANLIST
invtranlistT=["DTSTART","DTEND"]
invtranlists=g.query("select "+vars(invtranlistT)+"{"+
pairs("x",["INVACCTFROM:ACCTID"])+
"?x ofx:INVTRANLIST ?y . "+
pairs("y",invtranlistT)+"}",
initNs=NS,initBindings={"ACCTID":acctid})
for invtranlist in invtranlists: # At most one INVTRANLIST per account
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,invtranlistT,invtranlist)
#INVTRANLIST:INVBUY/SELL
invtranT=["INVTRAN:FITID","+DTTRADE","SECID:UNIQUEID","+UNIQUEIDTYPE"]
invtranTopt=[
"?INVTRAN:SRVRTID","?INVTRAN:DTSETTLE","?INVTRAN:REVERSALFITID",
"?INVTRAN:mEMO",];
invbuysellT=invtranT+[
"units","UNITPRICE",
"TOTAL",
"SUBACCTSEC","SUBACCTFUND",
]
invbuysellTopt=invtranTopt+[
"?COMMISSION","?TAXES","?FEES","?LOAD",
"?CURRENCY:CURRATE","+CURSYM",
"?ORIGCURRENCY:currate","+cursym",
"?INV401KSOURCE","?LOANID",
"?WITHHOLDING","?TAXEXEMPT","?GAIN",# INVSELL
"?STATEWITHHOLDING","?PENALTY","?MARKDOWN",
"?MARKUP", # INVBUY
"?LOANPRINCIPAL","?LOANINTEREST",
"?DTPAYROLL","?PRIORYEARCONTRIB",
]
invtranOpTopt=["?ACCRDINT","?BUYTYPE","?RELFITID","?OPTBUYTYPE","?SHPERCTRCT",
"?SELLREASON","?SELLTYPE","?AVGCOSTBASIS","?OPTSELLTYPE",
"?RELTYPE","?SECURED",]
invbuysells=g.query("select ?tran ?tranOp "+
vars(invbuysellT)+vars(invbuysellTopt)+
vars(invtranOpTopt)+"{"+
pairs("x",["INVACCTFROM:ACCTID"])+
"?x ofx:INVTRANLIST [ ?tran ?z ] ."+
"?z ?tranOp ?y ."+
pairs("y",invbuysellT)+
pairs("y",invbuysellTopt)+
pairs("z",invtranOpTopt)+
"}",
initNs=NS,initBindings={"ACCTID":acctid})
for invbuysell in invbuysells:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,["tran","tranOp"]+invbuysellT+invbuysellTopt+invtranOpTopt,
invbuysell)
#INVTRANLIST:INCOME
incomeT=invtranT
incomeTopt=invtranTopt+["?OPTACTION","?units","?SHPERCTRCT","?SUBACCTSEC",
"?RELFITID","?GAIN","?INCOMETYPE","?TOTAL",
"?SUBACCTFUND","?TAXEXEMPT","?WITHHOLDING",
"?CURRENCY:CURRATE","+CURSYM",
"?ORIGCURRENCY:CURRATE","+CURSYM",
"?SUBACCTTO","?SUBACCTFROM",
"?UNITPRICE","?COMMISSION","?TAXES","?FEES","?LOAD",
"?OLDUNITS","?NEWUNITS","?NUMERATOR","?DENOMINATOR",
"?FRACCASH","?TFERACTION","?POSTYPE",
"?INVACCTFROM:acctid","+brokerid", # lower case not to coflict with the transaction's ACCTID,BROKERID
"?AVGCOSTBASIS","?UNITPRICE","?DTPURCHASE"
]
incomes=g.query("select ?tran "+
vars(incomeT)+vars(incomeTopt)+"{"+
pairs("x",["INVACCTFROM:ACCTID"])+
"?x ofx:INVTRANLIST [ ?tran ?y ] ."+
pairs("y",incomeT)+
pairs("y",incomeTopt)+
"}",
initNs=NS,initBindings={"ACCTID":acctid})
for income in incomes:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,["tran"]+incomeT+incomeTopt,income)
#INVTRANLIST:INVBANKTRAN
invbanktranT=["SUBACCTFUND"]
stmttrnT=["TRNTYPE","DTPOSTED","TRNAMT","FITID",
"?DTUSER","?DTAVAIL",
"?CORRECTFITID","?CORRECTACTION","?SRVRTID",
"?CHECKNUM","?REFNUM","?SIC","?PAYEEID","?NAME",
"?PAYEE:NAME","+ADDR1","+ADDR2","+ADDR3","+CITY","+STATE","+POSTALCODE","+COUNTRY","+PHONE",
"?BANKACCTTO:bankid","+acctid","+accttype",
"?CCACCTTO:acctid",
"?mEMO","?INV401KSOURCE",
"?CURRENCY:currate","+cursym",
"?ORIGCURRENCY:currate","+cursym",
]
invbanktrans=g.query("select "+vars(invbanktranT)+vars(stmttrnT)+"{"+
pairs("x",["INVACCTFROM:ACCTID"])+
"?x ofx:INVTRANLIST [ ofx:INVBANKTRAN ?y ] ."+
"?y ofx:STMTTRN ?z ."+
pairs("y",invbanktranT)+pairs("z",stmttrnT)+
"}",
initNs=NS,initBindings={"ACCTID":acctid})
for invbanktran in invbanktrans:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,invbanktranT+stmttrnT,invbanktran)
def flatbank(g,f,acctid,context,cc=False):
if cc:
aCCTTYPE="CC"
invstmtrT=["CCACCTFROM:ACCTID","CURDEF","LEDGERBAL:DTASOF"]
else:
aCCTTYPE="BANK"
invstmtrT=["BANKACCTFROM:ACCTID","+BANKID","+ACCTTYPE","CURDEF",
"LEDGERBAL:DTASOF"]
invstmtrs=g.query("select "+vars(invstmtrT)+"{"+pairs("x",invstmtrT)+"}",
initNs=NS,initBindings={"ACCTID":acctid})
assert len(invstmtrs)==1
for invstmtr in invstmtrs:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
f.add((bNode,A3["aCCTTYPE"],Literal(aCCTTYPE)))
#BAL
balT=["LEDGERBAL:BALAMT","+dtASOF","?AVAILBAL:bALAMT","+dTASOF"]
bals=g.query("select "+vars(balT)+"{"+
pairs("x",[aCCTTYPE+"ACCTFROM:ACCTID"])+
pairs("x",balT)+"}",
initNs=NS,initBindings={"ACCTID":acctid})
for bal in bals:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,balT,bal)
#BANKTRANLIST
invtranlistT=["DTSTART","DTEND"]
invtranlists=g.query("select "+vars(invtranlistT)+"{"+
pairs("x",[aCCTTYPE+"ACCTFROM:ACCTID"])+
"?x ofx:BANKTRANLIST ?y . "+
pairs("y",invtranlistT)+"}",
initNs=NS,initBindings={"ACCTID":acctid})
for invtranlist in invtranlists: # At most one INVTRANLIST per account
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,invtranlistT,invtranlist)
#STMTTRN
stmttrnT=["TRNTYPE","DTPOSTED","TRNAMT","FITID",
"?DTUSER","?DTAVAIL",
"?CORRECTFITID","?CORRECTACTION","?SRVRTID",
"?CHECKNUM","?REFNUM","?SIC","?PAYEEID","?NAME",
"?PAYEE:NAME","+ADDR1","+ADDR2","+ADDR3","+CITY","+STATE",
"+POSTALCODE","+COUNTRY","+PHONE",
"?BANKACCTTO:bankid","+acctid","+accttype",
"?CCACCTTO:acctid",
"?mEMO","?INV401KSOURCE",
"?CURRENCY:currate","+cursym",
"?ORIGCURRENCY:currate","+cursym",
]
invbanktrans=g.query("select "+vars(stmttrnT)+"{"+
pairs("x",[aCCTTYPE+"ACCTFROM:ACCTID"])+
"?x ofx:BANKTRANLIST ?y ."+
"?y ofx:STMTTRN ?z ."+
pairs("z",stmttrnT)+
"}",
initNs=NS,initBindings={"ACCTID":acctid})
for invbanktran in invbanktrans:
bNode=BNode()
f.add((context,flatLine,bNode))
bprt(f,bNode,invstmtrT,invstmtr)
bprt(f,bNode,stmttrnT,invbanktran)
def flat(fin,fout,context=None,hashing=False):
g = Graph()
g.parse(fin,format='n3',publicID=context)
f = Graph() #output graph
f.bind("ofx",OFX)
f.bind("xsd",XSD)
f.bind("a3",A3)
context=FS[context]
flatseclist(g,f,context)
accounts=g.query("select ?typ "+vars(["ACCTID"])+"{"+
"[ ?typ ?x ] . "+
pairs("x",["ACCTID"])+"}",initNs=NS)
for account in accounts:
acctid=account[1]
typ=account[0]
if typ==OFX["BANKACCTFROM"]:
logging.info("Bank statement")
flatbank(g,f,acctid,context)
elif typ==OFX["CCACCTFROM"]:
logging.info("Credit card statement")
flatbank(g,f,acctid,context,cc=True)
elif typ==OFX["INVACCTFROM"]:
logging.info("Investment statement")
flatinv(g,f,acctid,context)
else:
logging.info("Unrecognized type %s"%account[1])
fout.write(f.serialize(format='n3'))
| [
[
8,
0,
0.0542,
0.0159,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0661,
0.0079,
0,
0.66,
0.0435,
868,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.0714,
0.0026,
0,
0.66,... | [
"\"\"\"Convert OFX v1.x or QFX files to flat lines.\nThis is done in two stages.\nThe first converts the OFX to N3 file that keeps the structure of the original\nOFX file.\nThe second stage converts the N3 file to a graph and then unstructure it.\n\"\"\"",
"help=\"\"\"Convert OFX v1.x file (MS-Money or Quicken We... |
########################################################################
# 3account - personal finance data-base
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
########################################################################
"""Convert Citi N.A. (Citibank North America)
Portfolio (investment) Account Activity CSV file to flat--OFX-RDF and
load into Sesame server.
"""
from csv2flt import *
import re
import os
import time
from n3tagvalue import *
header = [
['Account Number','Memo','Price','Quantity','Activity Type','Check Number',
'Total','Trade Date','Cusip']
]
line=['(?P<ACCTID>[\d\-A-Z]+)',
strre('MEMO'),
floatre('UNITPRICE'),
floatre('UNITS'),
strre('TRNTYPE'),
"(99999)?",
floatre('TOTAL'),
'(?P<month>\d{2})/(?P<day>\d{2})/(?P<year>\d{4})',
'#(((?P<SECID>[A-Z\d]{9})000)|\s*)',
]
footer=[ # This footer was added between 070501 and 070601
[],
["<!-- -->"],
]
def flat(csvfile,fout,context=None,hashing=False):
dtasof=time.strftime("%Y%m%d%H%M%S",time.gmtime(os.stat(csvfile).st_mtime))
logging.info("DTASOF=%s"%dtasof)
n3header(fout,"$Id$"[1:-1],context)
h,t,f = readcsvtable(csvfile,header,line,footer,optional_footer=True)
accttype='INVESTMENT'
currency='USD'
table_total=0
accounts=[]
for l in t:
print >>fout,"a3:flatLine [ ",
acctid=l['ACCTID']
memo=l['MEMO']
if l['SECID']:
secid=l['SECID']
else:
secid='CASH='+currency
dttrade = l['year']+l['month']+l['day']
units=float(l['UNITS']) if l['UNITS'] else None
if units==0: units=None
unitprice=float(l['UNITPRICE']) if l['UNITPRICE'] else None
if unitprice==0: unitprice=None
total=float(l['TOTAL']) if l['TOTAL'] else None
if total==0: total=None
# TRNTYPE
relsecid=None
assetclass=None
trntype=l['TRNTYPE']
if secid.startswith('CASH='):
if trntype=='DEPOSIT':
assert unitprice==None
assert units==None
assert total>0
trntype='CREDIT'
elif trntype=='CASH JRNL':
assert unitprice==None
assert units==None
assert total<0
trntype='DEBIT'
elif trntype=='FEE':
assert unitprice==None
assert units==None
assert total<0
trntype='DEBIT'
else:
logging.info('Unknown TRNTYPE %s'%trntype)
else:
if trntype=='BOUGHT':
assert unitprice>0
assert units>0
assert units*unitprice<=-total+0.02 # rounding error
trntype='BUY'
elif trntype=='SOLD':
assert unitprice>0
assert units<0
assert -units*unitprice>=total-0.03 # Rounding error
trntype='SELL'
elif trntype=='INTEREST':
assert unitprice==None or unitprice==1 # interest from deposit
assert units==None
assert total>0
trntype='INT'
elif trntype=='DIVIDEND':
assert unitprice==None
assert units==None
assert total
if total<0:
logging.info('%s','Negative dividend %f'%total)
trntype='DIV'
elif trntype=='CAPITAL GAIN':
assert unitprice==None
assert units==None
assert total
if total<0:
logging.info('%s','Negative dividend %f'%total)
if memo.find('L/T')!=-1:
trntype='CGLONG'
else:
trntype='CGSHORT'
elif trntype=='REINVEST':
if unitprice==None and units==None:
assert total<0
relsecid=secid
#TODO trntype='DEBIT'
#TODO secid='CASH='+currency
else:
assert unitprice>0
assert units>0
assert not total # is 0 because it is always followed with a REINVEST transaction in current account
trntype='BUY'
elif trntype=='REVERSAL':
pass
elif trntype=='CONVERSION':
assert total==None
assert unitprice==None
trntype='TRANSFER'
elif trntype=='WITHDRAWAL':
if units:
assert unitprice==1
assert units<0
assert -units*unitprice>=total-0.01 # Rounding error
trntype='DEBIT'
assetclass='DEPOSIT'
elif trntype=='DEPOSIT':
if units:
assert unitprice==1
assert units>0
assert units*unitprice<=-total+0.02 # rounding error
trntype='CREDIT'
assetclass='DEPOSIT'
else:
raise Exception('Unknown TRNTYPE')
strn=stagvalue('ACCTID',acctid,hashing=hashing)
strn+=n3secid('CUSIP',secid)
strn+=stagvalue('DTTRADE',dttrade)
strn+=stagvalue('tran',trntype)
strn+=stagvalue('units',units)
print >>fout,stagvalue('FITID',hash(strn)),
print >>fout,strn,
print >>fout,stagvalue('DTASOF',dtasof),stagvalue('ACCTTYPE',accttype),
print >>fout,stagvalue('DTSETTLE',dttrade),
print >>fout,stagvalue('UNITPRICE',unitprice),
print >>fout,stagvalue('TOTAL',total),
print >>fout,stagvalue('CURRENCY',currency),
print >>fout,stagvalue('ASSETCLASS',assetclass),
#TODO print >>fout,stagvalue('RELSECID',relsecid),
print >>fout,stagvalue('mEMO',memo,hashing=hashing)
# Every transaction generates a matching transaction in the current account.
# The only exception is a REINVEST transaction that for some reason has a TOTAL=0 in the CSV file but on the other hand
# has a seperatet DEBIT transaction in the same CSV file.
#if total and not secid.startswith('CASH='):
#relsecid=secid
#trntype='CREDIT' if total>0 else 'DEBIT'
#secid='CASH='+currency
#print >>fout,stagvalue('ACCTID',acctid,hashing=hashing),
#print >>fout,stagvalue('ACCTTYPE','INVESTMENT'),
#print >>fout,n3secid('CUSIP',secid),
#print >>fout,stagvalue('DTSETTLE',dttrade),
#print >>fout,stagvalue('DTTRADE',dttrade),
#print >>fout,stagvalue('TRNTYPE',trntype),
#print >>fout,stagvalue('UNITS',total),
#print >>fout,stagvalue('UNITPRICE',1.),
#print >>fout,stagvalue('TOTAL',total),
#print >>fout,stagvalue('CURRENCY',currency),
#print >>fout,stagvalue('RELSECID',relsecid),
print >>fout,"];"
print >>fout,"."
| [
[
8,
0,
0.1025,
0.02,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.115,
0.005,
0,
0.66,
0.1111,
514,
0,
1,
0,
0,
514,
0,
0
],
[
1,
0,
0.12,
0.005,
0,
0.66,
0... | [
"\"\"\"Convert Citi N.A. (Citibank North America)\nPortfolio (investment) Account Activity CSV file to flat--OFX-RDF and\nload into Sesame server.\n\"\"\"",
"from csv2flt import *",
"import re",
"import os",
"import time",
"from n3tagvalue import *",
"header = [\n['Account Number','Memo','Price','Quanti... |
########################################################################
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
########################################################################
"""ofx2xml OFX-file-name XML-file-name
write OFX v1.02 file in XML format.
You should supply the name of the OFX file to read and the name of the XML file
in which the result will be written to.
OFX v1.02 is written in SGML which is harder to parse than XML
Also OFX v2 are XML based.
Hopefully the same code can be used to parse both the output of this program and OFX v2.
This program verify the HTTP header of an OFX reply remove it and then
transfer the body of the HTTP OFX reply to the execuable osx.exe
osx.exe is assumed to be located in c:\OpenSP
you can download the latest version from http://sourceforge.net/projects/openjade/, http://openjade.sourceforge.net/
(I tried OpenSP-1.5.2-win32.zip)
For a minimal configuration you will need sox.exe and ospXXX.dll
osx.exe needs the definitions (DTD) of OFX supplied from
http://www.ofx.net/DownloadPage/Files/OFX1.0.3.zip
The files are:
ofxact.dtd,ofxbank.dtd,ofxbill.dtd,ofxinv.dtd,ofxmail.dtd,ofxmain.dtd,ofxprof.dtd,ofxsign.dtd
place these files at c:\OpenSP
"""
import sys, os, string
import subprocess
def ofx2xml(ofxfile,outfile):
if not os.path.exists(ofxfile):
print "\nFile %s not found." % (ofxfile)
raise Exception('File not found')
fofx=open(ofxfile,'r')
lofx=fofx.readlines()
fofx.close()
# Skip everything before <OFX>
scanner_output = []
in_ofx = False
for l in lofx:
l = l.replace('&','&')
if in_ofx:
scanner_output.append(l)
elif l.find('<OFX>') != -1:
scanner_output.append(l)
in_ofx = True
if os.path.exists('c:/OpenSP'):
popen_args = 'c:/OpenSP/osx.exe -D c:\\OpenSP -wno-valid ofxmain.dtd -'
else:
popen_args = ['osx', '-wno-valid', 'ofxmain.dtd', '-']
if isinstance(outfile,basestring):
xml_file = open(outfile, 'w')
else:
tmpfile=ofxfile+'.xml'
xml_file = open(tmpfile, 'w')
p = subprocess.Popen(popen_args,
stdin=subprocess.PIPE, stdout=xml_file,
stderr=subprocess.PIPE)
(out, err) = p.communicate(input=''.join(scanner_output))
if p.returncode != 0:
if out:
print "osx stdout:\n" + out
if err:
print "osx stderr:\n" + err
raise Exception("Unexpected return code %d from osx" % p.returncode)
xml_file.close()
if not isinstance(outfile,basestring):
xml_file=open(tmpfile,'r')
for line in xml_file:
outfile.write(line)
xml_file.close()
os.remove(tmpfile)
if __name__ == "__main__":
if len(sys.argv) < 3:
print "ofx2xml <OFX-file> <XML-file>"
sys.exit(1)
ofxfile = sys.argv[1]
xmlfile = sys.argv[2]
ofx2xml(ofxfile,xmlfile)
| [
[
8,
0,
0.29,
0.23,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.42,
0.01,
0,
0.66,
0.25,
509,
0,
3,
0,
0,
509,
0,
0
],
[
1,
0,
0.43,
0.01,
0,
0.66,
0.5,
... | [
"\"\"\"ofx2xml OFX-file-name XML-file-name\nwrite OFX v1.02 file in XML format.\nYou should supply the name of the OFX file to read and the name of the XML file\nin which the result will be written to.\n\nOFX v1.02 is written in SGML which is harder to parse than XML\nAlso OFX v2 are XML based.\nHopefully the same ... |
"""ofx2n3.py -- interpret OFX format as RDF
Converts OFX format (as in downloaded back statements etc
The conversion is only syntactic. The OFX modelling is
pretty weel thought out, so taking it as defining an effecive
RDF ontolofy seems to make sense. Rules can then be used to
define mapping into your favorite ontology.
DESIGN NOTES
The properties have even been left in upper
case, although I wouldn't do that again next time.
The SGML/XML tree is converted into a tree of blank nodes.
This is made easier by the rule that OFX does not allow empty elements
or mixed content.
OFX actually defines a request-response protocol using HTTP and
SGML (v1.*) or XML (v2.*).
I have only had access to downloaded statements which look like HTTP
responses carrying SGML, so that is what this handles.
REFERENCES
This converts data from the common proprietary format whcih seems
to be in use. The spec i found is a later XML-based version, which will
be much simpler. Alas the spec not served directly on the web.
"Open" Financial Exchange
Specification 2.0
April 28, 2000 (c) 2000 Intuit Inc., Microsoft Corp.
We try to stick to:
Python Style Guide
Author: Guido van Rossum
http://www.python.org/doc/essays/styleguide.html
LICENSE OF THIS CODE
Workspace: http://www.w3.org/2000/10/swap/pim/financial/
Copyright 2002-2003 World Wide Web Consortium, (Massachusetts
Institute of Technology, European Research Consortium for
Informatics and Mathematics, Keio University). All Rights
Reserved. This work is distributed under the W3C(R) Software License
http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231
in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE.
This was http://www.w3.org/2000/10.swap/pim/financial/OFX-to-n3.py
"""
help="""Convert OFX file to OFX-RDF (in memory) and load to Sesame server.
"""
__version__ = "$Id: ofx2n3.py 161 2009-10-25 00:18:28Z udi.benreuven $"
thisSource = "http://www.w3.org/2000/10.swap/pim/financial/OFX-to-n3.py"
import os
import sys
import re
import logging
from n3tagvalue import stagvalue,n3header
reHeader = re.compile(r"""\s*
(OFXHEADER:(?P<OFXHEADER>\d+)\s*)
(DATA:(?P<DATA>[A-Z][A-Z0-9]*?)\s*)
(VERSION:(?P<VERSION>\d*)\s*) # do a non-greedy match, not eat the next tag
(SECURITY:(?P<SECURITY>[A-Z][A-Z0-9]*?)\s*)
(ENCODING:(?P<ENCODING>[A-Z][A-Z0-9]*?)\s*)
(CHARSET:(?P<CHARSET>\d+)\s*)
(COMPRESSION:(?P<COMPRESSION>[A-Z][A-Z0-9]*?)\s*)
(OLDFILEUID:(?P<OLDFILEUID>[A-Z][A-Z0-9]*?)\s*)
(NEWFILEUID:(?P<NEWFILEUID>[A-Z][A-Z0-9]*)\s*)
""",re.X) #
reTag = re.compile(r"\s*<(?P<endtag>/)?(?P<tag>[A-Z][A-Z0-9\.]+)>")
reValue = re.compile(r"([^<]*)")
def flat(fin,fout,context=None,hashing=False):
if isinstance(fin,str):
fin=open(fin)
else:
fin.seek(0)
doc = fin.read() # Process the content as a singel buffer
n3header(fout,"$Id$"[1:-1],context)
print >>fout," ofxh:headers ["
doc=doc.strip()
stack = []
header = reHeader.match(doc)
if not header: raise SyntaxError("Can't find header")
pos = header.end()
header = header.groupdict()
for tag,value in header.iteritems():
print >>fout," ofxh:%s \"%s\";" % (tag, value) #@@ do n3 escaping
print >>fout,"];\n"
# Check our assumptions on header
if header["ENCODING"] != "USASCII":
raise SyntaxError('ENCODING:%s should be USASCII'%header["ENCODING"])
if header["OFXHEADER"] != "100":
raise SyntaxError('OFXHEADER:%s should be 100'%header["OFXHEADER"])
if header["VERSION"] != "102":
raise SyntaxError('VERSION:%s should be 102'%header["VERSION"])
valuetag=None
while pos < len(doc):
tag = reTag.match(doc,pos)
if not tag:
raise SyntaxError("No Tag %s..."%doc[pos:pos+20])
pos = tag.end()
endtag = tag.group("endtag")
tag = tag.group("tag")
if endtag:
if tag != valuetag:
tag2 = stack.pop()
if tag != tag2: raise SyntaxError(
"Found </%s> when </%s> expected.\nStack: %s" %
(tag, tag2, stack))
print >>fout,"%s]; # %s" % (" "*len(stack), tag)
valuetag=None
else:
value = reValue.match(doc,pos)
if value:
pos = value.end()
value = value.group(1).strip()
else:
value=""
if not value: # Start tag
valuetag=None
print >>fout,"%s ofx:%s [" %(" "*len(stack), tag)
stack.append(tag)
else: # Data tag
valuetag=tag
print >>fout," "*len(stack),stagvalue(tag,value,hashing=hashing)
if stack: raise SyntaxError("Unclosed tags: %s" % stack)
print >>fout,"."
| [
[
8,
0,
0.1934,
0.3796,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.3905,
0.0146,
0,
0.66,
0.0833,
868,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.4088,
0.0073,
0,
0.66... | [
"\"\"\"ofx2n3.py -- interpret OFX format as RDF\n\nConverts OFX format (as in downloaded back statements etc\n\n The conversion is only syntactic. The OFX modelling is\n pretty weel thought out, so taking it as defining an effecive\n RDF ontolofy seems to make sense. Rules can then be used to\n define mapping ... |
########################################################################
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
########################################################################
r"""Flat files
All incoming statements are flat lines:
Statements have a relatively shallow and rigid structrue, in order to simplfy
the processing and make all formats of bank statements look the same, the
statements are transformed into lines that are completely flat:
each line contains all the information it needs without any reference to other
lines.
A line is made from pairs of tag and value.
All the pairs in the same line are assumed to be bound togther and describe
different aspects of the same object(s)
(e.g., bank, account, holding, transaction or asset).
The syntax of each line is:
<flat-line> = [SP <tag><op><value> ]*
<tag> = <a3Tag> | <ofxTag>
<ofxTag> = [A-Z]+ # tag from ofx: name space
<a3Tag> = [a-z][a-zA-Z0-9]* # tag from a3: name space
<op> = <relational-op>
<relational-op> = '='
<value> = <text-without-blanks> | ...
(QUOTE <any-text> QUOTE) | ... # Surounding quotes on values are removed
'`' <python-expression> '`' | # Evaluate the text surronded by a back-strich
# as a python expression and cast to string
* You can insert a quote by \"
* " is translated back to a quote char.
* You can put a '#' comment at an end of a flat line
* empty lines are ignored
* The parse method converts lines to a dictionary in which the tag is the key
and its value is a list of all the values the same tag received in the line.
As a result the order in which pairs appeared in the line is not preserved.
None flat aspects of a file of flat lines:
* All lines are assumed to come from the file even if the file name itself does
not appear in each line this is trivial but it has some
implications. For example: two lines in the same file showing a transaction
from the same holding on the same time will be assumed to be two different
transactions if they come from the same file, however the same two lines
coming from two different files are more likely to be two different
discriptions of the same transaction. Note that the file basename itself will
be used as a reference, without its path or extension or any other
signature so different unique names should be used for different flat files.
"""
import os
import re
import sys
from rdflib.graph import *
from rdflib.namespace import *
from n3tagvalue import *
OFX = Namespace("http://www.w3.org/2000/10/swap/pim/ofx#")
A3 = Namespace("http://code.google.com/p/3account/wiki/Schema#")
XSD = Namespace("http://www.w3.org/2001/XMLSchema#")
FS = Namespace("file://")
flatLine = A3["flatLine"]
reobj=re.compile(r"""(?x) # allow for commenting inside the RE
\s* # Ignore white space
(
(?P<comment>\#)| # Ignore the rest of the line.
( # or look for a regular tag-value pair
(?P<tag>\w+) # tag
= # look for operator
( # Value
(?P<quote>"|`)? # look for quote at start of value
(?P<value>.*?) # finally the value. Dont be greedy so not to eat the
# next token. Note that an empty value is also OK
(?(quote)
((?<!\\)(?P=quote))| # If there was a quote at the start then look
# for it at the end. Skip backslash quote
(?=(\s|$)) # If there wasn't a quote at start: The value is not
# greedy so force an unquoted value to be followed with
# white-space or or to continue to end of string
)
) #end of value
) # end of tag-value pair
)""")
def parse(line):
"""Parse a line from a flat file into its tag-value pairs.
The return-value is a dictionary where the keys are the tags and the
values are the list of the values that appeared in the line.
"""
if not line: return {}
l=line
ldict={}
while True:
p=reobj.match(l)
if not p: break
# handle the return value in line for a recursive usage
l=l[p.end():]
if p.group('comment'):
return ldict
tag=p.group('tag')
values=ldict.setdefault(tag,[]) # Make sure the tag appears in ldict even if there is no value.
quote=p.group('quote')
value=p.group('value')
value=value.replace('"','"')
if not value: # empty values don't need to be added to list of values for tag in ldict
pass
elif quote=='`':
values.append(eval(value))
else:
values.append(value)
return ldict
def flat(fin,fout,context=None,hashing=False):
if isinstance(fin,str):
fin=open(fin, "r")
else:
fin.seek(0)
n3header(fout,"$Id$"[1:-1],context)
for line in fin:
line = parse(line)
if not line: continue
print >>fout,"a3:flatLine [ ",
for tag,value in line.iteritems():
print >>fout,stagvalue(tag,value,hashing=hashing),
print >>fout,"];"
print >>fout,"."
fin.close()
| [
[
8,
0,
0.2721,
0.3061,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.4354,
0.0068,
0,
0.66,
0.0714,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.4422,
0.0068,
0,
0.66... | [
"r\"\"\"Flat files\nAll incoming statements are flat lines:\nStatements have a relatively shallow and rigid structrue, in order to simplfy\nthe processing and make all formats of bank statements look the same, the\nstatements are transformed into lines that are completely flat:\neach line contains all the informati... |
########################################################################
# xls2csv2 - convert excel to csv and handle encoding
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
########################################################################
"""xls2csv2 [-e encoding] Excel-file-name CSV-file-name
write the Excel file in CSV format
handle unicode strings using specified encoding.
Example: xls2csv2 -e hebrew foo.xls foo.csv
"""
import sys, os, string
from optparse import OptionParser
try:
import win32com.client
import pythoncom
except ImportError:
pass
import csv
def xls2csv2(xlfile,csvfile,encoding):
xlfile=os.path.abspath(xlfile)
try:
xl = win32com.client.Dispatch ("Excel.Application")
# xl.Visible = 1
wb = xl.Workbooks.Open (xlfile)
xls = wb.ActiveSheet
nr = xls.UsedRange.Rows.Count
nc = xls.UsedRange.Columns.Count
if isinstance(csvfile,str):
fp=open(csvfile, "wb")
else:
fp=csvfile
writer = csv.writer(fp)
for r in range(1,nr+1):
row=[]
for c in range(1,nc+1):
cell = xls.Cells(r,c).Value
if (not encoding is None) and isinstance(cell,unicode):
cell=cell.encode(encoding,'replace')
row.append(cell)
writer.writerow(row)
wb.Close(SaveChanges=0)
xl.Quit()
except pythoncom.com_error, (hr, msg, exc, arg):
print "Failed to convert excel file to CSV"
print "The Excel call failed with code %d: %s" % (hr, msg)
if exc is None:
print "There is no extended error information"
else:
wcode, source, text, helpFile, helpId, scode = exc
print "The source of the error is", source
print "The error message is", text
print "More info can be found in %s (id=%d)" % (helpFile, helpId)
raise
except:
print "failed to write to csv file", sys.exc_info()[0]
raise
def main():
parser = OptionParser(__doc__)
ehelp="Cell containing unicode will be encoded with this encoder, e.g.,\
-e hebrew"
parser.add_option("-e", "--encoding", dest="encoding",help=ehelp)
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("Missing file name")
xlfile = args[0]
csvfile = args[1]
if not os.path.exists(xlfile):
print "\nFile %s not found." % (xlfile)
sys.exit(1)
try:
xls2csv2(xlfile,csvfile,options.encoding)
except:
sys.exit(1)
if __name__ == "__main__":
main()
| [
[
8,
0,
0.2287,
0.0638,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.266,
0.0106,
0,
0.66,
0.1429,
509,
0,
3,
0,
0,
509,
0,
0
],
[
1,
0,
0.2766,
0.0106,
0,
0.66,... | [
"\"\"\"xls2csv2 [-e encoding] Excel-file-name CSV-file-name\n\nwrite the Excel file in CSV format\nhandle unicode strings using specified encoding.\nExample: xls2csv2 -e hebrew foo.xls foo.csv\n\"\"\"",
"import sys, os, string",
"from optparse import OptionParser",
"try:\n import win32com.client\n impor... |
"""Init.py
I'm not actually sure what this does.
"""
__version__ = "$Revision: 1.1 $"
__all__ = [ "string"
]
| [
[
8,
0,
0.3182,
0.5455,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.7273,
0.0909,
0,
0.66,
0.5,
162,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.9545,
0.1818,
0,
0.66,
... | [
"\"\"\"Init.py\n\nI'm not actually sure what this does.\n\n\n\"\"\"",
"__version__ = \"$Revision: 1.1 $\"",
"__all__ = [ \"string\"\n ]"
] |
import hashlib
import logging
import re
import ConfigParser
# List of tags which we want their value to be hashed if "hashing" is True
hash_list = ["ACCTID", "INTU_USERID","owner"];
# List of tags that may contain a sub-string that needs to be hashed
# usually transfers write the account to which the transfer is made in these
# fields.
hash_replace_list = ["NAME", "MEMO", "mEMO","acctMemo"]
typeDecimal = ["MKTVAL", "TRNAMT", "UNITS", "UNITPRICE",
"TOTAL", "BALAMT", "PARVALUE", "COUPONRT", "MARKDOWN", "MARKUP",
"COMMISSION", "FEES","AVAILCASH","MARGINBALANCE",
"SHORTBALANCE","BUYPOWER","INTEREST","FEES","TAXES"
]
typeDT = ["DTASOF", "DTPRICEASOF", "DTTRADE", "DTSETTLE", "DTPOSTED",
"DTSTART", "DTEND", "DTMAT", "DTSERVER", "DTPROFUP",
]
reDT = re.compile(r"""(?P<Y>(\d\d|\d{4}))(?P<M>\d{2})(?P<D>\d{2}) # Year, Month, Day
((?P<h>\d{2})(?P<m>\d{2})((?P<s>\d{2})(\.\d+)?)?)? # Hours, Minutes,seconds,mili
((\[(?P<tzsign>(\+|\-))?(?P<tz>\d{1,2})\:[A-Z]{3,4}\])|[A-Z]{3,4})? # Time-Zone
$""",re.X)
def expandyear(year):
"""Y2K"""
if len(year)==2:
if year>'50':
year='19'+year
else:
year='20'+year
return year
def svalue(tag,value,language=None,hashing=False):
if isinstance(value,basestring):
value=value.replace('"','\\"')
else:
value=str(value)
value_type=""
if tag.upper() in typeDecimal:
value_type = "^^xsd:decimal"
elif tag.upper()[0:2]=="DT":
value_type = "^^xsd:dateTime"
m= reDT.match(value)
if not m: raise SyntaxError("Bad date-time %s" % (value))
if m.group("h"):
value = "%s-%s-%sT%s:%s:%s"%(
expandyear(m.group("Y")),m.group("M"),m.group("D"),
m.group("h"),m.group("m"),m.group("s"))
else:
value = "%s-%s-%sT00:00:00"%(
expandyear(m.group("Y")),m.group("M"),m.group("D"))
if m.group("tz"):
if m.group("tzsign"):
value += m.group("tzsign")
else:
value += '+'
if len(m.group("tz")) ==1:
value += '0'
value += m.group("tz")
value += ":00"
else:
import cherrypy
value+=cherrypy.config.get('tz','')
elif hashing and tag.upper() in hash_list:
new_value = hashlib.sha1(value).hexdigest()[:4]
if isinstance(hashing,ConfigParser.RawConfigParser):
if not hashing.has_section('hash'):
hashing.add_section('hash')
if hashing.has_option('hash',value):
new_value = hashing.get('hash',value)
else:
hashing.set('hash',value,new_value)
logging.info("Hashing %s to %s"%(value,new_value))
value=new_value
elif (tag.upper() in hash_replace_list and
isinstance(hashing,ConfigParser.RawConfigParser) and
hashing.has_section('hash')
):
for old,new in hashing.items('hash'):
value = value.replace(old,new)
elif language:
value = value.decode(language).encode('utf-8')
return (value,value_type)
def stagvalue(tag,values,language=None,hashing=False):
if not isinstance(values,list):
values=[values]
tag=tag.replace(".","_")
res=""
for value in values:
if isinstance(value,float) or value:
value,value_type=svalue(tag,value,language,hashing)
ns = 'ofx' if tag.isupper() else 'a3'
res+='%s:%s "%s"%s; ' % (ns,tag, value,value_type)
return res
def sdict(d,language=None,hashing=False):
if not d: return
assert isinstance(d,dict)
res=[]
for k,v in d.iteritems():
res.append(stagvalue(t,v,language,hashing))
return " ".join(res)
def n3secid(uniqueidtype,uniqueid):
secid='ofx:UNIQUEID "%s" ; ofx:UNIQUEIDTYPE "%s" ;'%(uniqueid,uniqueidtype)
return secid
#return 'ofx:SECID [ %s ] ;'%secid
def n3header(fout,version,context=None):
print >>fout,"""# Generated by %s""" % version
print >>fout,"""
@prefix ofx: <http://www.w3.org/2000/10/swap/pim/ofx#>.
@prefix ofxh: <http://www.w3.org/2000/10/swap/pim/ofx-headers#>.
@prefix xsd: <http://www.w3.org/2001/XMLSchema#>.
@prefix a3: <http://code.google.com/p/3account/wiki/Schema#>.
"""
if context:
print >>fout,"<file://%s>"%context
else:
print >>fout,"<>"
| [
[
1,
0,
0.0081,
0.0081,
0,
0.66,
0,
154,
0,
1,
0,
0,
154,
0,
0
],
[
1,
0,
0.0163,
0.0081,
0,
0.66,
0.0714,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0244,
0.0081,
0,
... | [
"import hashlib",
"import logging",
"import re",
"import ConfigParser",
"hash_list = [\"ACCTID\", \"INTU_USERID\",\"owner\"];",
"hash_replace_list = [\"NAME\", \"MEMO\", \"mEMO\",\"acctMemo\"]",
"typeDecimal = [\"MKTVAL\", \"TRNAMT\", \"UNITS\", \"UNITPRICE\",\n \"TOTAL\", \"BALAMT\", \"PA... |
########################################################################
# Copyright (C) 2009 Ehud Ben-Reuven
# udi@benreuven.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
########################################################################
"""Convert a CSV file into a flat file using a set of tables of
regular expressions"""
import re
import csv
import logging
############ Set of regular expression macros used to build templates ##########
def eurdtzre(name):
"""regular expression for an European Date + Time + TZ
For example: '15.03.2007 / 21:46 CET'
'11.04.2007 / 11:49 CEST'
"""
return ('(?P<%s_day>\d\d)\.(?P<%s_month>\d\d)\.(?P<%s_year>\d{4}) '+
'/ (?P<%s_hours>\d\d):(?P<%s_min>\d\d) (?P<%s_TZ>[A-Z]{3,4})')%(
name,name,name,name,name,name)
def strre(name):
"""regular expression for a string"""
return '(?P<%s>.*)'%name
def currencyre(name):
"""regular expression for currency
For example: 'USD'
"""
return '(?P<%s>[A-Z]{3})'%name
def commanumre(name):
"""regular expression for a number with commas
For example: '9,894.97', '0', '-120.00'
"""
return '(?P<%s>((\-?\d{1,3}(,\d{3})*\.\d\d)|0))'%name
def commanumreint(name):
"""regular expression for a number with commas
For example: '9,894.97', '0', '-120.00'
"""
return '(?P<%s>((\-?\d{1,3}(,\d{3})*(\.\d\d)?)|0))'%name
def commaintre(name):
"""regular expression for an integer with commas
For example: '9,894', '0', '-120'
"""
return '(?P<%s>((\-?\d{1,3}(,\d{3})*)|0))'%name
def pvaluere(name,sig=2):
"""regular expression for a positive number with cents
'0.00', '100.12', '9894.97'
"""
return '(?P<%s>\d+\.\d{%d,2}|0)'%(name,sig)
def pvaluereint(name,sig=2):
"""regular expression for a positive number with cents
'0.00', '100.12', '9894.97'
"""
return '(?P<%s>\d+(\.\d{%d,2})?|0)'%(name,sig)
def valuere(name,sig=2):
"""regular expression for a number with cents
'0.00', '100.12', '9894.97'
"""
return '(?P<%s>\-?\d+\.\d{%d,2}|0)'%(name,sig)
def valuereint(name,sig=2):
"""regular expression for a number with cents
'0.00', '100.12', '9894.97'
"""
return '(?P<%s>\-?\d+(\.\d{%d,2})?|0)'%(name,sig)
def valuere_optionalcomma(name,sig=2):
"""regular expression for a number with cents
'0.00', '100.12', '9894.97', '9,894.97'
"""
return '(?P<%s>\-?((\d{1,3}(,\d{3})*)|(\d+))\.\d{%d,2}|0)'%(name,sig)
def pintre(name):
"""regular expression for an int number
"""
return '(?P<%s>\d+)'%(name)
def floatre(name):
"""regular expression for a float number
"""
return '(?P<%s>\-?\d+(\.\d*)?)'%(name)
def eurdre24(name,sep='.'):
"""regular expression for an European date
'20.02.2007'
"""
return '(?P<%s_day>\d\d)\%s(?P<%s_month>\d\d)\%s(?P<%s_year>\d\d(\d\d)?)'%(
name,sep,name,sep,name)
def eurdre(name,sep='.',year_len=4):
"""regular expression for an European date
'20.02.2007'
"""
return '(?P<%s_day>\d\d)\%s(?P<%s_month>\d\d)\%s(?P<%s_year>\d{%d})'%(
name,sep,name,sep,name,year_len)
def usdre(name,sep='/',year_len=2):
"""regular expression for a US date
'12/29/06'
"""
return '(?P<%s_month>\d\d)\%s(?P<%s_day>\d\d)\%s(?P<%s_year>\d{%d})'%(
name,sep,name,sep,name,year_len)
def empty(name):
return '(?P<%s>)'%name
##################### Parse a CSV file with fixed structure ####################
def matchline(row,regexps):
"""Match a list of strings (a parsed line from a CSV file) with a list of
regular expressions (template line) and return a dictionary of all the named
groups found in all matches or None if there is no match"""
maxncell=min([len(row),len(regexps)])
for i in range(maxncell,len(row)):
if row[i]:
return
for i in range(maxncell,len(regexps)):
if not re.match('\\A'+regexps[i]+'\\Z',''):
return
objs={}
for ncell,cell in enumerate(row):
if ncell>=maxncell:
break
r=regexps[ncell]
obj=re.match('\\A'+r+'\\Z',cell)
if not obj:
logging.info("%s does not match re %s"%(cell,r))
return
d=obj.groupdict()
for k in d:
if k not in objs or objs[k]==None:
objs[k]=d[k]
elif d[k]==None: continue
elif d[k]!=objs[k]:
raise Exception("Conflicting match")
return objs
def fixedtable(reader,regexps):
"""Read a CSV file and match it to a fixed template made from list of
regular expressions for every line in the CSV.
Return a list of diconaries, one for each line, each dictonary contains all
the named groups found."""
table=[]
if not regexps: return table
for nline,row in enumerate(reader):
regs=regexps[nline]
objs=matchline(row,regs)
if objs==None:
logging.info('Mismatch at line %d:'%nline)
logging.info('%s','ROW: %s'%row)
logging.info('%s','REG: %s'%regs)
return None
table.append(objs)
if nline==len(regexps)-1: return table
raise Exception('File too short')
def multilinetable(reader,line,footer=None,optional_footer=False):
"""Read a CSV table using line which is a list of regular expresions
that should each match the appropriate cell in each line.
Repeat the process until EOF or until there is a match with a footer
which is an RE of a fixed table
"""
table=[]
footer_table=[]
footer_line=0
first_footer=True
for row in reader:
if footer:
foot_match=False
while True:
if footer_line>=len(footer):
raise Exception('File too long, line %d'%reader.line_num)
#try:
# If a footer line is empty then check that all cells in row
# are empty and continue to next row.
# If any of the cells in row is not empty then skip the empty footer
# line
last_empty_row=None
if footer[footer_line]==None:
if not any(row):
last_empty_row=row
foot_match=True
break
footer_line+=1
if footer_line>=len(footer):
footer_objs=None
else:
footer_objs=matchline(row,footer[footer_line])
if footer_objs!=None:
footer_table.append(footer_objs)
footer_line+=1
foot_match=True
break
else:
if first_footer:
footer_line=0
# if we failed to match then if we are on the first line
# of the foot (0) the footer didnt started yet.
if not footer_line:
break
# if we are inside the footer then there is one chance
# that we have a match:
# the current footer regex should have been applied to the
# previous line if the previouse line was taken to be a
# a match with None
if (last_empty_row and
matchline(last_empty_row,footer[footer_line])):
footer_line+=1
continue # try matching the next footer line with current row
raise Exception('Footer line %d does not match line %d'%(
footer_line,reader.line_num))
if foot_match:
first_footer=False
continue
assert(footer_line==0) # Entire footer must match once the first line was matched
objs=matchline(row,line)
if objs==None:
logging.info('%s','ROW: %s'%row)
logging.info('%s','LIN: %s'%line)
raise Exception("Failed to match")
#return (None,None)
table.append(objs)
if footer_line==0 and optional_footer:
pass
else:
if footer and footer_line<len(footer):
if footer<len(footer)-1 or not last_empty_row:
raise Exception('Only %d lines of footer where found'%footer_line)
return (table,footer_table)
def readcsvtable_multiformat(csvfile,htf_list):
for version,htf in enumerate(htf_list):
logging.info("Attempting version %d"%version)
h,t,f = readcsvtable(csvfile,htf[0],htf[1],htf[2])
if h!=None: break
if h==None:
raise Exception('Unknwon file format')
return h,t,f,version
def readcsvtable(fname,header=[],line=[],footer=[],optional_footer=False):
"""Its assumed that the CSV file has a fixed structure:
It starts with a fixed header lines
followed by zero or more lines each with the same structure
and then followed by EOF or a footer line
Return a list of dictonaries for the header, a list of diconaries for the table
and a dictonary for the footer.
Each dictonary contains all the named groups that where found in one line.
"""
if isinstance(fname,str):
fp=open(fname, "rb")
else:
fp=fname
fp.seek(0)
reader = csv.reader(fp)
logging.info("Reading header")
h=fixedtable(reader,header)
if h==None: return (None,None,None)
logging.info("Reading table & footer")
t,f = multilinetable(reader,line,footer,optional_footer)
return (h,t,f)
| [
[
8,
0,
0.0661,
0.0071,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0714,
0.0036,
0,
0.66,
0.04,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.075,
0.0036,
0,
0.66,
... | [
"\"\"\"Convert a CSV file into a flat file using a set of tables of\nregular expressions\"\"\"",
"import re",
"import csv",
"import logging",
"def eurdtzre(name):\n \"\"\"regular expression for an European Date + Time + TZ\n\tFor example: '15.03.2007 / 21:46 CET'\n\t'11.04.2007 / 11:49 CEST'\n\t\"\"\"\n ... |
from rdflib import Namespace, URIRef, Literal, BNode
from rdflib.graph import Graph
from urllib import quote_plus
from httplib import HTTPConnection
from cStringIO import StringIO
import xml.dom
from simplejson import loads
owlNS = Namespace("http://www.w3.org/2002/07/owl#")
owlClass = owlNS["Class"]
owlObjectProperty = owlNS["ObjectProperty"]
owlDatatypeProperty = owlNS["DatatypeProperty"]
rdfNS = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
rdfProperty = rdfNS["Property"]
rdfType = rdfNS["type"]
rdfsNS = Namespace("http://www.w3.org/2000/01/rdf-schema#")
rdfsSubClassOf = rdfsNS["subClassOf"]
rdfsDomain = rdfsNS["domain"]
rdfsRange = rdfsNS["range"]
class SesameTransaction:
def __init__(self):
self.trans = xml.dom.getDOMImplementation().createDocument(None, "transaction", None)
def add(self, statement):
self.__addAction("add", statement)
def remove(self, statement):
self.__addAction("remove", statement)
def toXML(self):
return self.trans.toxml()
def __addAction(self, action, statement):
element = self.trans.createElement(action)
for item in statement:
if isinstance(item, Literal):
literal = self.trans.createElement("literal")
if item.datatype is not None: literal.setAttribute("datatype", str(item.datatype))
if item.language is not None: literal.setAttribute("xml:lang", str(item.language))
literal.appendChild(self.trans.createTextNode(str(item)))
element.appendChild(literal)
elif isinstance(item, URIRef):
uri = self.trans.createElement("uri")
uri.appendChild(self.trans.createTextNode(str(item)))
element.appendChild(uri)
elif isinstance(item, BNode):
bnode = self.trans.createElement("bnode")
bnode.appendChild(self.trans.createTextNode(str(item)))
element.appendChild(bnode)
else:
raise Exception("Unknown element: " + item)
self.trans.childNodes[0].appendChild(element)
class SesameConnection:
def __init__(self, host, repository=None):
self.host = host
self.repository = repository
self.sparql_prefix=""
def addnamespace(self, id, ns):
self.sparql_prefix+='PREFIX %s:<%s>\n' % (id,ns)
def repositories(self):
return self.__getsparql__('repositories')
def use_repository(self, r):
self.repository = r
def __request__(self, method, path, data, headers):
conn = HTTPConnection(self.host)
conn.request(method, path, data, headers)
response = conn.getresponse()
if response.status != 200 and response.status != 204:
raise Exception("Sessame connection error " + str(response.status) + " " + response.reason)
response = response.read()
conn.close()
return response
def query(self, query, graph):
path = '/openrdf-sesame/repositories/' + self.repository + '?query=' + quote_plus(self.sparql_prefix + query)
data = self.__request__("GET", path, None, {"accept":"application/rdf+xml"})
graph.parse(StringIO(data))
return graph
def querylist(self, query):
path = '/openrdf-sesame/repositories/' + self.repository + '?query=' + quote_plus(self.sparql_prefix + query)
data = self.__request__("GET", path, None, {"accept":'application/sparql-results+json'})
try:
result=loads(data)['results']['bindings']
return result
except:
return [{'error':data}];
def update(self, add=None, remove=None):
path = '/openrdf-sesame/repositories/' + self.repository + "/statements"
trans = SesameTransaction()
if remove is not None:
for statement in remove: trans.remove(statement)
if add is not None:
for statement in add: trans.add(statement)
data = self.__request__("POST", path, trans.toXML(), {"content-type":"application/x-rdftransaction"})
class OWLOntology:
"""
This class loads the mappings from simple property names
to OWL property URIs.
"""
def __init__(self, sesameConnection):
# query for all OWL classes and properties:
self._ontGraph = Graph()
sesameConnection.query(
"""construct {
?c rdf:type owl:Class .
?c rdfs:subClassOf ?sc .
?p rdfs:domain ?c .
?p rdfs:range ?d .
?p rdf:type ?pt .
?p rdfs:subPropertyOf ?sp .
} where
{
?c rdf:type owl:Class .
OPTIONAL {
?c rdfs:subClassOf ?sc .
}
OPTIONAL {
?p rdfs:domain ?c .
?p rdfs:range ?d .
?p rdf:type ?pt .
}
OPTIONAL {
?p rdfs:subPropertyOf ?sp .
}
}""", self._ontGraph)
# map type properties to simplified names:
self.propertyMaps = {}
for ontClass in self._ontGraph.subjects(rdfType, owlClass):
propertyMap = self.propertyMaps[ontClass] = {}
for property in self._ontGraph.subjects(rdfsDomain, ontClass):
propertyName = self.getSimplifiedName(property)
propertyMap[propertyName] = property
for property in self._ontGraph.subjects(rdfsRange, ontClass):
propertyName = "r_" + self.getSimplifiedName(property)
propertyMap[propertyName] = property
# recursivley copy property mappings across the class hierarchy:
def copySuperclassProperties(ontClass, propertyMap):
for superclass in self._ontGraph.objects(ontClass, rdfsSubClassOf):
copySuperclassProperties(superclass, propertyMap)
propertyMap.update(self.propertyMaps[ontClass])
for ontClass in self._ontGraph.subjects(rdfType, owlClass):
copySuperclassProperties(ontClass, self.propertyMaps[ontClass])
def getSimplifiedName(self, uri):
if "#" in uri: return uri[uri.rfind("#") + 1:]
return uri[uri.rfind("/") + 1:]
class RDFObjectGraphFactory:
"""
A factory for RDFObjects.
"""
def __init__(self, connection):
self.connection = connection
self.connection.addnamespace("xsd", "http://www.w3.org/2001/XMLSchema#")
self.connection.addnamespace("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#")
self.connection.addnamespace("rdfs", "http://www.w3.org/2000/01/rdf-schema#")
self.connection.addnamespace("owl", "http://www.w3.org/2002/07/owl#")
self.ontology = OWLOntology(connection)
def createGraph(self):
return RDFObjectGraph(self.connection, self.ontology)
class RDFObjectGraph:
"""
The RDFObjectGraph caches object values for populating RDFObject values.
"""
def __init__(self, connection, ontology):
self._connection = connection
self._ontology = ontology
self._rdfObjects = {}
self._graph = Graph()
self._added = Graph()
self._removed = Graph()
def get(self, uri):
"""
Gets an RDFObject for the specified URI.
"""
if uri not in self._rdfObjects:
self._load(uri)
self._rdfObjects[uri] = RDFObject(uri, self)
return self._rdfObjects[uri]
def _load(self, uri):
"""
This method ensures that the data for a uri is loaded into
the local graph.
"""
if uri not in self._rdfObjects:
self._connection.query(
"construct { <" + uri + "> ?p ?o . " +
"?rs ?rp <" + uri + "> .} where { " +
"OPTIONAL { <" + uri + "> ?p ?o } " +
"OPTIONAL { ?rs ?rp <" + uri + "> } }", self._graph)
def _subjects(self, prop, uri):
"""
Retrieves all subjects for a property and object URI.
"""
for triple in self._graph.triples((None, prop, uri)):
if triple not in self._removed:
yield triple[0]
for triple in self._added.triples((None, prop, uri)):
yield triple[0]
def _objects(self, uri, prop):
"""
Retrieves all objects for a subject URI and property.
"""
for triple in self._graph.triples((uri, prop, None)):
if triple not in self._removed:
yield triple[2]
for triple in self._added.triples((uri, prop, None)):
yield triple[2]
def _predicates(self, subject=None, object=None):
"""
Retrieves all unique predicates for a subject or object URI.
"""
result = set()
for triple in self._graph.triples((subject, None, object)):
if triple not in self._removed:
result.add(triple[1])
for triple in self._added.triples((subject, None, object)):
result.add(triple[1])
return result
def _setSubjects(self, values, prop, uri):
"""
Sets all subjects for a property and uri.
"""
newValues = set(values)
existingValues = set(self._graph.subjects(prop, uri))
for value in existingValues - newValues:
removed = (value, prop, uri)
self._added.remove(removed)
self._removed.add(removed)
for value in newValues - existingValues:
added = (value, prop, uri)
self._removed.remove(added)
self._added.add(added)
def _setObjects(self, uri, prop, values):
"""
Sets all objects for a uri and property.
"""
newValues = set(values)
existingValues = set(self._graph.objects(uri, prop))
for value in existingValues - newValues:
removed = (uri, prop, value)
self._added.remove(removed)
self._removed.add(removed)
for value in newValues - existingValues:
added = (uri, prop, value)
self._removed.remove(added)
self._added.add(added)
def commit(self):
"""
Commits changes to the remote graph and flushes local caches.
"""
self._connection.update(add=self._added, remove=self._removed)
self._rdfObjects = {}
self._graph = Graph()
self._added = Graph()
self._removed = Graph()
class RDFObject:
"""
The RDFObject wraps an RDF URI and automatically retrieves property values
as they are referenced as object attributes.
"""
def __init__(self, uri, objectGraph):
self.__dict__["uri"] = uri
self.__dict__["_objectGraph"] = objectGraph
def __repr__(self):
return "<RDFObject " + self.uri + ">"
def __str__(self):
return self.uri
def __getattr__(self, name):
self._objectGraph._load(self.uri)
prop = self._getProp(name)
if name.startswith("r_"):
values = self._objectGraph._subjects(prop, self.uri)
else:
values = self._objectGraph._objects(self.uri, prop)
results = self._wrapResults(values)
return results
def __setitem__(self, name, values):
self.__setattr__(name,values)
def __setattr__(self, name, values):
self._objectGraph._load(self.uri)
unwrappedValues = []
for value in values:
# unwrap rdfobjects:
if isinstance(value, RDFObject):
unwrappedValues.append(value.uri)
# pass through rdflib objects:
elif isinstance(value, URIRef) or isinstance(value, BNode) or isinstance(value, Literal):
unwrappedValues.append(value)
# wrap literals:
else:
unwrappedValues.append(Literal(value))
# look for a property mapping for this name:
prop = self._getProp(name)
if name.startswith("r_"):
self._objectGraph._setSubjects(unwrappedValues, prop, self.uri)
else:
self._objectGraph._setObjects(self.uri, prop, unwrappedValues)
def _getProp(self, name):
if name == "type": return rdfType
for type in self._objectGraph._objects(self.uri, rdfType):
propertyMap = self._objectGraph._ontology.propertyMaps[type]
if name in propertyMap: return propertyMap[name]
raise AttributeError("Unknown property '" + name + "'")
def __getitem__(self, key):
self._objectGraph._load(self.uri)
# iterate over predicates and look for a matching name:
reverse = key.startswith("r_")
if reverse:
preds = self._objectGraph._predicates(object=self.uri)
name = key[2:]
else:
preds = self._objectGraph._predicates(subject=self.uri)
name = key
for pred in preds:
if self._objectGraph._ontology.getSimplifiedName(pred) == name:
if reverse:
values = self._objectGraph._subjects(pred, self.uri)
else:
values = self._objectGraph._objects(self.uri, pred)
return self._wrapResults(values)
raise KeyError("Property '" + key + "' not found")
def _wrapResults(self, results):
ret = []
for result in results:
if isinstance(result, Literal): ret.append(result)
else: ret.append(self._objectGraph.get(result))
return ret
if __name__ == "__main__":
sc = SesameConnection("localhost:8080", "semprog")
factory = RDFObjectGraphFactory(sc)
objectGraph = factory.createGraph()
filmNs = Namespace("http://www.semprog.com/film#")
bladerunner = objectGraph.get(filmNs["blade_runner"])
harrisonford = objectGraph.get(filmNs["harrison_ford"])
print bladerunner.type
print bladerunner.name[0]
print bladerunner.starring[0].has_actor[0].name[0]
print bladerunner.starring[0].has_actor[0].r_has_actor[0].r_starring
print harrisonford.name[0]
print harrisonford.r_has_actor[0].r_starring
print bladerunner["name"][0]
print bladerunner["starring"][0]["has_actor"][0]["name"][0]
names = bladerunner.name
names.append("Do Androids Dream of Electric Sheep?")
bladerunner.name = names
print bladerunner.name
objectGraph.commit()
print bladerunner.name
bladerunner.name = ["Blade Runner"]
objectGraph.commit()
print bladerunner.name
raiders = objectGraph.get(filmNs["raiders_of_the_lost_ark"])
raiders.type = [filmNs["Film"]]
raiders.name = ["Raiders of the Lost Ark"]
perf2 = objectGraph.get(filmNs["perf2"])
perf2.type = [filmNs["Performance"]]
indy = objectGraph.get(filmNs["indy"])
indy.type = [filmNs["Role"]]
indy.name = ["Indiana Jones"]
perf2.r_starring = [raiders, bladerunner]
perf2.has_actor = [harrisonford]
perf2.has_role = [indy]
objectGraph.commit()
print indy.name
print raiders.name
perf2.r_starring = [raiders]
objectGraph.commit()
print perf2.r_starring
print raiders.starring[0].has_actor[0].uri
print harrisonford.r_has_actor
| [
[
1,
0,
0.0025,
0.0025,
0,
0.66,
0,
926,
0,
4,
0,
0,
926,
0,
0
],
[
1,
0,
0.005,
0.0025,
0,
0.66,
0.0417,
752,
0,
1,
0,
0,
752,
0,
0
],
[
1,
0,
0.0074,
0.0025,
0,
0... | [
"from rdflib import Namespace, URIRef, Literal, BNode",
"from rdflib.graph import Graph",
"from urllib import quote_plus",
"from httplib import HTTPConnection",
"from cStringIO import StringIO",
"import xml.dom",
"from simplejson import loads",
"owlNS = Namespace(\"http://www.w3.org/2002/07/owl#\")",
... |
import urllib2
from urllib import quote_plus
from simplejson import loads
from httplib import HTTPConnection
import urlparse
class connection:
def __init__(self,url):
self.baseurl=url
self.sparql_prefix=""
self.host=urlparse.urlparse(url).netloc
def addnamespace(self,id,ns):
self.sparql_prefix+='PREFIX %s:<%s>\n' % (id,ns)
def __getsparql__(self,method):
req=urllib2.Request(self.baseurl+method,
headers={'Accept':'application/sparql-results+json'})
data=urllib2.urlopen(req)
data=data.read()
try:
result=loads(data)['results']['bindings']
return result
except:
return [{'error':data}];
def repositories(self):
return self.__getsparql__('repositories')
def use_repository(self,r):
self.repository=r
def query(self,q):
q='repositories/'+self.repository+'?query='+quote_plus(self.sparql_prefix+q)
return self.__getsparql__(q)
def construct_query(self,q):
q='repositories/'+self.repository+'?query='+quote_plus(self.sparql_prefix+q)
data=urllib2.urlopen(urllib2.Request(self.baseurl+q,headers={'Accept':'application/sparql-results+json'})).read()
return data
def postdata(self,data,context=None):
#host=self.baseurl+'/repositories/'+self.repository+'/statements?context=%3Chttp://semprog.com/pysesame%3E'
host=self.baseurl+'/repositories/'+self.repository+'/statements'
if context:
host += '?context=' + quote_plus(context)
res=urllib2.urlopen(urllib2.Request(host,data,{'Content-Type':'application/rdf+xml;charset=UTF-8'})).read()
return res
def __request__(self, method, path, data, headers):
conn = HTTPConnection(self.host)
conn.request(method, path, data, headers)
response = conn.getresponse()
if response.status != 200 and response.status != 204:
raise Exception("Sessame connection error " +
str(response.status) + " " + response.reason)
response = response.read()
conn.close()
return response
def putdata(self,data,context=None):
host=self.baseurl+'/repositories/'+self.repository+'/statements'
if context:
host += '?context=' + quote_plus(context)
return self.__request__("PUT", host, data,
{'Content-Type':
'application/rdf+xml;charset=UTF-8'})
if __name__=='__main__':
c=connection('http://localhost:8080/openrdf-sesame/')
c.use_repository('Movies')
c.addnamespace('fb','http://rdf.freebase.com/ns/')
c.addnamespace('dc','http://purl.org/dc/elements/1.1/')
res=c.query("""SELECT ?costar ?fn WHERE {?film fb:film.film.performances ?p1 .
?film dc:title ?fn .
?p1 fb:film.performance.actor ?a1 .
?a1 dc:title "John Malkovich".
?film fb:film.film.performances ?p2 .
?p2 fb:film.performance.actor ?a2 .
?a2 dc:title ?costar .}""")
for r in res:
print r | [
[
1,
0,
0.0118,
0.0118,
0,
0.66,
0,
345,
0,
1,
0,
0,
345,
0,
0
],
[
1,
0,
0.0235,
0.0118,
0,
0.66,
0.1667,
614,
0,
1,
0,
0,
614,
0,
0
],
[
1,
0,
0.0353,
0.0118,
0,
... | [
"import urllib2",
"from urllib import quote_plus",
"from simplejson import loads",
"from httplib import HTTPConnection",
"import urlparse",
"class connection:\n def __init__(self,url):\n self.baseurl=url\n self.sparql_prefix=\"\"\n self.host=urlparse.urlparse(url).netloc\n \n ... |
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Sep 8 2010)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
wx.ID_Window = 1000
wx.ID_Window_StatusBar = 1001
wx.ID_Window_MenuBar = 1002
wx.ID_Window_Quit = 1003
wx.ID_Window_SplitterWindow_LeftPanel = 1004
###########################################################################
## Class Window
###########################################################################
class Window ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_Window, title = u"Klein", pos = wx.DefaultPosition, size = wx.Size( 705,238 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
self.mStatusBar = self.CreateStatusBar( 1, wx.ST_SIZEGRIP, wx.ID_Window_StatusBar )
self.mMenuBar = wx.MenuBar( 0 )
self.mFile = wx.Menu()
self.mQuit = wx.MenuItem( self.mFile, wx.ID_Window_Quit, u"Quit", wx.EmptyString, wx.ITEM_NORMAL )
self.mFile.AppendItem( self.mQuit )
self.mMenuBar.Append( self.mFile, u"File" )
self.SetMenuBar( self.mMenuBar )
mSizer = wx.BoxSizer( wx.HORIZONTAL )
self.mSplitterWindow = wx.SplitterWindow( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.SP_3D )
self.mSplitterWindow.Bind( wx.EVT_IDLE, self.mSplitterWindowOnIdle )
self.mLeftPanel = wx.Panel( self.mSplitterWindow, wx.ID_Window_SplitterWindow_LeftPanel, wx.DefaultPosition, wx.DefaultSize, 0 )
mRightSizer = wx.BoxSizer( wx.VERTICAL )
self.mCanvasPanel = wx.Panel( self.mLeftPanel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
self.mCanvasPanel.SetBackgroundColour( wx.Colour( 128, 128, 128 ) )
mRightSizer.Add( self.mCanvasPanel, 1, wx.EXPAND |wx.ALL, 5 )
self.mLeftPanel.SetSizer( mRightSizer )
self.mLeftPanel.Layout()
mRightSizer.Fit( self.mLeftPanel )
self.mRightPanel = wx.Panel( self.mSplitterWindow, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.VSCROLL )
mLeftSizer = wx.BoxSizer( wx.VERTICAL )
self.m_button38 = wx.Button( self.mRightPanel, wx.ID_ANY, u"1", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button38, 0, wx.ALL, 5 )
self.m_button39 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button39, 0, wx.ALL, 5 )
self.m_button40 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button40, 0, wx.ALL, 5 )
self.m_button41 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button41, 0, wx.ALL, 5 )
self.m_button42 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button42, 0, wx.ALL, 5 )
self.m_button43 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button43, 0, wx.ALL, 5 )
self.m_button44 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button44, 0, wx.ALL, 5 )
self.m_button45 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button45, 0, wx.ALL, 5 )
self.m_button46 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button46, 0, wx.ALL, 5 )
self.m_button47 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button47, 0, wx.ALL, 5 )
self.m_button48 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button48, 0, wx.ALL, 5 )
self.m_button49 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button49, 0, wx.ALL, 5 )
self.m_button50 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button50, 0, wx.ALL, 5 )
self.m_button51 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button51, 0, wx.ALL, 5 )
self.m_button52 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button52, 0, wx.ALL, 5 )
self.m_button53 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button53, 0, wx.ALL, 5 )
self.m_button54 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button54, 0, wx.ALL, 5 )
self.m_button55 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button55, 0, wx.ALL, 5 )
self.m_button56 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button56, 0, wx.ALL, 5 )
self.m_button57 = wx.Button( self.mRightPanel, wx.ID_ANY, u"MyButton", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button57, 0, wx.ALL, 5 )
self.m_button58 = wx.Button( self.mRightPanel, wx.ID_ANY, u"-1", wx.DefaultPosition, wx.DefaultSize, 0 )
mLeftSizer.Add( self.m_button58, 0, wx.ALL, 5 )
self.mRightPanel.SetSizer( mLeftSizer )
self.mRightPanel.Layout()
mLeftSizer.Fit( self.mRightPanel )
self.mSplitterWindow.SplitVertically( self.mLeftPanel, self.mRightPanel, 486 )
mSizer.Add( self.mSplitterWindow, 1, wx.EXPAND, 5 )
self.SetSizer( mSizer )
self.Layout()
self.Centre( wx.BOTH )
def __del__( self ):
pass
def mSplitterWindowOnIdle( self, event ):
self.mSplitterWindow.SetSashPosition( 486 )
self.mSplitterWindow.Unbind( wx.EVT_IDLE )
app = wx.App()
win = Window(None)
win.Show(True)
app.MainLoop()
| [
[
1,
0,
0.0704,
0.007,
0,
0.66,
0,
666,
0,
1,
0,
0,
666,
0,
0
],
[
14,
0,
0.0845,
0.007,
0,
0.66,
0.1,
180,
1,
0,
0,
0,
0,
1,
0
],
[
14,
0,
0.0915,
0.007,
0,
0.66,
... | [
"import wx",
"wx.ID_Window = 1000",
"wx.ID_Window_StatusBar = 1001",
"wx.ID_Window_MenuBar = 1002",
"wx.ID_Window_Quit = 1003",
"wx.ID_Window_SplitterWindow_LeftPanel = 1004",
"class Window ( wx.Frame ):\n\t\n\tdef __init__( self, parent ):\n\t\twx.Frame.__init__ ( self, parent, id = wx.ID_Window, title... |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
# Author: Huang Jiahua <jhuangjiahua@gmail.com>
# License: LGPLv3+
# Last modified:
'''文档格式转换
'''
import os
import subprocess
def doc2html(docfile):
'''将 mso doc 转换为 html
依赖 wv
'''
dir = os.tmpnam()
dir = dir.replace('file', 'gwrite-%s/file' % os.getlogin() )
html = 'gwrite.html'
os.makedirs(dir)
subprocess.Popen(['wvHtml', '--targetdir=%s'%dir, docfile, html]).wait()
return dir + '/' + html
| [
[
8,
0,
0.3542,
0.0833,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.4583,
0.0417,
0,
0.66,
0.3333,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.5,
0.0417,
0,
0.66,
... | [
"'''文档格式转换\n'''",
"import os",
"import subprocess",
"def doc2html(docfile):\n '''将 mso doc 转换为 html\n 依赖 wv\n '''\n dir = os.tmpnam()\n dir = dir.replace('file', 'gwrite-%s/file' % os.getlogin() )\n html = 'gwrite.html'\n os.makedirs(dir)",
" '''将 mso doc 转换为 html\n 依赖 wv\n '''... |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''WebkitLinkView
@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}
@license: LGPLv3+
'''
import gobject
import gtk
import webkit
import re
def proc(html):
"""处理 html 链接
>>> proc(' <a href="#3.2.1">3.2.1 heading</a>')
'<a href="#3.2.1" onDblClick="window.location.href=\'+\'+this.href;" onMouseOver="document.title=this.href;" > 3.2.1 heading</a>'
"""
return re.sub('( *)(.*?)(>)(.*)',
'''\\2 onDblClick="window.location.href='+'+this.href;" onMouseOver="document.title=this.href;" \\3\\1\\4''',
html)
def stastr(stri):
'''处理字符串的 ' "
'''
return stri.replace("\\","\\\\").replace(r'"',r'\"').replace(r"'",r"\'").replace('\n',r'\n')
class LinkTextView(webkit.WebView):
#__gtype_name__ = 'LinkTextView'
__gsignals__ = {
'url-clicked': (gobject.SIGNAL_RUN_LAST, None, (str, str)), # href, type
}
def __init__(self):
webkit.WebView.__init__(self)
self.connect("navigation-requested", self.on_navigation_requested)
#self.connect_after("populate-popup", lambda view, menu: menu.destroy()) # 暂时禁止右键菜单
self.set_property('can-focus', False)
pass
def updatehtmllinks(self, html):
self.load_html_string('''<html>
<head>
<style>
a:hover {
font-weight: bold;
border-bottom: 1px solid blue;
}
a {
width: 90%%;
text-decoration: none ;
white-space: pre;
display: block;
}
</style>
</head>
<body>
<code>%s</code>
</body>
</html>''' % proc(html), '') # 首次执行时还没 document.body 对象
self.updatehtmllinks = lambda html : self.execute_script('document.body.innerHTML="<code>%s</code>";' % stastr(proc(html))) # 保持滚动条位置
pass
def on_navigation_requested(self, widget, WebKitWebFrame, WebKitNetworkRequest):
href = WebKitNetworkRequest.get_uri()
if '#' in href:
self.emit("url-clicked", href, "link")
pass
return True
if __name__=="__main__":
main()
| [
[
8,
0,
0.0616,
0.0548,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1096,
0.0137,
0,
0.66,
0.125,
90,
0,
1,
0,
0,
90,
0,
0
],
[
1,
0,
0.1233,
0.0137,
0,
0.66,
... | [
"'''WebkitLinkView\n@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}\n@license: LGPLv3+\n'''",
"import gobject",
"import gtk",
"import webkit",
"import re",
"def proc(html):\n \"\"\"处理 html 链接\n\n >>> proc(' <a href=\"#3.2.1\">3.2.1 heading</a>')\n '<a href=\"#3.2.1\" onDblClick=\"window.loc... |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''Gtk 对话框
@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}
@license: LGPLv3+
'''
import os
import gtk
import gobject
__all__ = ['error', 'info', 'inputbox', 'messagedialog', 'open', 'save', 'warning',
'yesno']
try: import i18n
except: from gettext import gettext as _
def colorbox(title="Changing color", previous_color='', current_color=''):
'''
'''
dialog = gtk.ColorSelectionDialog("Changing color")
colorsel = dialog.colorsel
if current_color:
colorsel.set_previous_color(previous_color)
if current_color:
colorsel.set_current_color(current_color)
colorsel.set_has_palette(True)
response = dialog.run()
htmlcolor = ''
if response == gtk.RESPONSE_OK:
color = colorsel.get_current_color()
rgb = (color.red, color.green, color.blue)
htmlcolor = '#' + ''.join((str(hex(i/257))[2:].rjust(2, '0') for i in rgb))
dialog.destroy()
return htmlcolor
def textbox(title='Text Box', label='Text',
parent=None, text=''):
"""display a text edit dialog
return the text , or None
"""
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
dlg.set_default_size(500,500)
#lbl = gtk.Label(label)
#lbl.set_alignment(0, 0.5)
#lbl.show()
#dlg.vbox.pack_start(lbl, False)
gscw = gtk.ScrolledWindow()
gscw.set_shadow_type(gtk.SHADOW_IN)
#gscw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
gscw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
textview=gtk.TextView(buffer=None)
buffer = textview.get_buffer()
if text:buffer.set_text(text)
#textview.show()
gscw.add(textview)
#gscw.show()
dlg.vbox.pack_start(gscw)
dlg.show_all()
resp = dlg.run()
text=buffer.get_text(buffer.get_start_iter(),buffer.get_end_iter())
dlg.destroy()
if resp == gtk.RESPONSE_OK:
return text
return None
def combobox(title='ComboBox', label='ComboBox', parent=None, texts=['']):
'''dialog with combobox
'''
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
label1 = gtk.Label(label)
label1.set_alignment(0, 0.5)
label1.set_padding(5, 5)
label1.set_line_wrap(True)
label1.show()
dlg.vbox.pack_start(label1, False, False, 0)
combobox1_List = gtk.ListStore(gobject.TYPE_STRING)
combobox1 = gtk.ComboBox()
combobox1.show()
#combobox1_List.append(["1122"])
combobox1.set_model(combobox1_List)
cell = gtk.CellRendererText()
combobox1.pack_start(cell, True)
combobox1.add_attribute(cell, 'text', 0)
dlg.vbox.pack_start(combobox1, True, True, 0)
for i in texts:
combobox1.append_text(i)
combobox1.set_active(0)
resp = dlg.run()
t = combobox1.get_active()
text = texts[t]
dlg.destroy()
if resp == gtk.RESPONSE_CANCEL:
return None
return text
def spinbox2(title='2 Spin Box', label1='value1:', label2='value2:',
parent=None, value1=3, value2=3):
"""dialog with 2 spin buttons
return (value1,value2) , or ()
"""
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
lbl = gtk.Label(title)
lbl.set_alignment(0, 0.5)
dlg.vbox.pack_start(lbl, False)
#vbox1 = gtk.VBox(False, 0)
#vbox1.show()
#vbox1.set_spacing(0)
table2 = gtk.Table()
table2.show()
table2.set_row_spacings(0)
table2.set_col_spacings(0)
label1 = gtk.Label(label1)
label1.set_alignment(0, 0.5)
label1.set_padding(0, 0)
label1.set_line_wrap(False)
label1.show()
table2.attach(label1, 0, 1, 0, 1, gtk.FILL, 0, 0, 0)
label2 = gtk.Label(label2)
label2.set_alignment(0, 0.5)
label2.set_padding(0, 0)
label2.set_line_wrap(False)
label2.show()
table2.attach(label2, 0, 1, 1, 2, gtk.FILL, 0, 0, 0)
adj = gtk.Adjustment(1.0, 1.0, 512.0, 1.0, 5.0, 0.0)
spin1=gtk.SpinButton(adj,0,0)
if value1: spin1.set_value(value1)
table2.attach(spin1, 1, 2, 0, 1, gtk.EXPAND|gtk.FILL, 0, 0, 0)
adj2 = gtk.Adjustment(1.0, 1.0, 512.0, 1.0, 5.0, 0.0)
spin2=gtk.SpinButton(adj2,0,0)
if value2: spin2.set_value(value2)
table2.attach(spin2, 1, 2, 1, 2, gtk.EXPAND|gtk.FILL, 0, 0, 0)
#vbox1.pack_start(table2, True, True, 0)
dlg.vbox.pack_start(table2)
dlg.show_all()
resp = dlg.run()
value1=spin1.get_value()
value2=spin2.get_value()
dlg.hide()
if resp == gtk.RESPONSE_CANCEL:
return ()
return (value1,value2)
def inputbox(title='Input Box', label='Please input the value',
parent=None, text=''):
"""dialog with a input entry
return text , or None
"""
#@TODO: 要直接回车确定
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
lbl = gtk.Label(label)
lbl.set_alignment(0, 0.5)
lbl.show()
dlg.vbox.pack_start(lbl)
entry = gtk.Entry()
if text: entry.set_text(text)
entry.show()
dlg.vbox.pack_start(entry, False)
dlg.set_default_response(gtk.RESPONSE_OK)
resp = dlg.run()
text = entry.get_text()
dlg.hide()
if resp == gtk.RESPONSE_CANCEL:
return None
return text
def inputbox2(title='2 Input Box', label1='value1:', label2='value2:',
parent=None, text1='', text2=''):
"""dialog with 2 input buttons
return (text1,text2) , or ()
"""
strlabel2 = label2
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
lbl = gtk.Label(title)
lbl.set_alignment(0, 0.5)
dlg.vbox.pack_start(lbl, False)
table1 = gtk.Table()
table1.show()
table1.set_row_spacings(0)
table1.set_col_spacings(0)
label2 = gtk.Label(label1)
label2.set_alignment(0.5, 0.5)
label2.set_padding(0, 0)
label2.set_line_wrap(False)
label2.show()
table1.attach(label2, 0, 1, 0, 1, gtk.FILL, 0, 0, 0)
entry2 = gtk.Entry()
entry2.set_text("")
entry2.set_editable(True)
entry2.show()
entry2.set_visibility(True)
table1.attach(entry2, 1, 2, 0, 1, gtk.EXPAND|gtk.FILL, 0, 0, 0)
label3 = gtk.Label(strlabel2)
label3.set_alignment(0, 0.5)
label3.set_padding(0, 0)
label3.set_line_wrap(False)
label3.show()
table1.attach(label3, 0, 1, 1, 2, gtk.FILL, 0, 0, 0)
entry3 = gtk.Entry()
entry3.set_text("")
entry3.set_editable(True)
entry3.show()
entry3.set_visibility(True)
table1.attach(entry3, 1, 2, 1, 2, gtk.EXPAND|gtk.FILL, 0, 0, 0)
if text1: entry2.set_text(text1)
if text2: entry3.set_text(text2)
dlg.vbox.pack_start(table1)
dlg.set_default_response(gtk.RESPONSE_OK)
dlg.show_all()
resp = dlg.run()
text1 = entry2.get_text()
text2 = entry3.get_text()
dlg.hide()
if resp == gtk.RESPONSE_CANCEL:
return ()
return (text1,text2)
def savechanges(text=_("Save Changes?"), parent=None):
'''Save Changes?
return 1, -1, 0 => yes, no, cancel
'''
d = gtk.MessageDialog(parent=parent, flags=gtk.DIALOG_MODAL,
type=gtk.MESSAGE_INFO,)
d.add_buttons(gtk.STOCK_YES, 1, gtk.STOCK_NO, -1, gtk.STOCK_CANCEL, 0)
d.set_markup(text)
d.show_all()
response = d.run()
d.destroy()
return response
def infotablebox(title=_("Info"), short=_("Info"), info=[[_("Key:"), _("Value")]], parent=None):
'''show info table box
'''
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
label = gtk.Label()
label.set_markup(short)
label.set_padding(20, 10)
label.set_alignment(0, 0)
label.show()
dlg.vbox.pack_start(label, False, False, 0)
##
table = gtk.Table()
table.show()
# table
y = 0
for line in info:
x = 0
left = 0
for text in line:
label = gtk.Label()
#label.set_selectable(1) # 会干扰编辑区选中状态
label.set_padding(10, 3)
label.set_alignment(left, 0)
label.set_markup("%s" % text)
label.show()
table.attach(label, x, x+1, y, y+1,)
x += 1
left = 1
pass
y += 1
pass
dlg.vbox.pack_start(table, False, False, 5)
response = dlg.run()
dlg.destroy()
return response
def messagedialog(dialog_type, short, long=None, parent=None,
buttons=gtk.BUTTONS_OK, additional_buttons=None):
d = gtk.MessageDialog(parent=parent, flags=gtk.DIALOG_MODAL,
type=dialog_type, buttons=buttons)
if additional_buttons:
d.add_buttons(*additional_buttons)
d.set_markup(short)
if long:
if isinstance(long, gtk.Widget):
widget = long
elif isinstance(long, basestring):
widget = gtk.Label()
widget.set_markup(long)
else:
raise TypeError("long must be a gtk.Widget or a string")
expander = gtk.Expander(_("Click here for details"))
expander.set_border_width(6)
expander.add(widget)
d.vbox.pack_end(expander)
d.show_all()
response = d.run()
d.destroy()
return response
def error(short, long=None, parent=None):
"""Displays an error message."""
return messagedialog(gtk.MESSAGE_ERROR, short, long, parent)
def info(short, long=None, parent=None):
"""Displays an info message."""
return messagedialog(gtk.MESSAGE_INFO, short, long, parent)
def warning(short, long=None, parent=None):
"""Displays a warning message."""
return messagedialog(gtk.MESSAGE_WARNING, short, long, parent)
def yesno(text="OK ?", parent=None):
"""
return 1 or 0 . ( yes/no )
"""
## return messagedialog(gtk.MESSAGE_WARNING, text, None, parent,
## buttons=gtk.BUTTONS_YES_NO)
i = messagedialog(gtk.MESSAGE_INFO, text, None, parent,
buttons=gtk.BUTTONS_YES_NO)
if i == -8:
return 1
return 0
def open(title='', parent=None,
patterns=[], mimes=[], name_mimes=[], name_patterns=[], folder=None):
"""Displays an open dialog.
return the full path , or None"""
filechooser = gtk.FileChooserDialog(title or _('Open'),
parent,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
if patterns:
file_filter = gtk.FileFilter()
for pattern in patterns:
file_filter.add_pattern(pattern)
filechooser.set_filter(file_filter)
pass
if mimes:
file_filter = gtk.FileFilter()
for mime in mimes:
file_filter.add_mime_type(mime)
filechooser.add_filter(file_filter)
pass
if name_mimes:
for name, mime in name_mimes:
file_filter = gtk.FileFilter()
file_filter.set_name(name)
file_filter.add_mime_type(mime)
filechooser.add_filter(file_filter)
if not "*" in [ i[1] for i in name_patterns]:
name_patterns += [[_("All Files"), "*"]]
pass
for name, pattern in name_patterns:
file_filter = gtk.FileFilter()
file_filter.set_name(name)
file_filter.add_pattern(pattern)
filechooser.add_filter(file_filter)
filechooser.set_default_response(gtk.RESPONSE_OK)
if folder:
filechooser.set_current_folder(folder)
response = filechooser.run()
if response != gtk.RESPONSE_OK:
filechooser.destroy()
return
path = filechooser.get_filename()
if path and os.access(path, os.R_OK):
filechooser.destroy()
return path
abspath = os.path.abspath(path)
error(_('Could not open file "%s"') % abspath,
_('The file "%s" could not be opened. '
'Permission denied.') % abspath)
filechooser.destroy()
return path
def save(title='', parent=None, current_name='',
patterns=[], mimes=[], name_mimes=[], name_patterns=[], folder=None):
"""Displays a save dialog.
return the full path , or None
"""
filechooser = gtk.FileChooserDialog(title or _('Save'),
parent,
gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
if patterns:
file_filter = gtk.FileFilter()
for pattern in patterns:
file_filter.add_pattern(pattern)
filechooser.set_filter(file_filter)
pass
if mimes:
file_filter = gtk.FileFilter()
for mime in mimes:
file_filter.add_mime_type(mime)
filechooser.add_filter(file_filter)
pass
if name_mimes:
for name, mime in name_mimes:
file_filter = gtk.FileFilter()
file_filter.set_name(name)
file_filter.add_mime_type(mime)
filechooser.add_filter(file_filter)
if not "*" in [ i[1] for i in name_patterns]:
name_patterns += [[_("All Files"), "*"]]
pass
for name, pattern in name_patterns:
file_filter = gtk.FileFilter()
file_filter.set_name(name)
file_filter.add_pattern(pattern)
filechooser.add_filter(file_filter)
if current_name:
filechooser.set_current_name(current_name)
filechooser.set_default_response(gtk.RESPONSE_OK)
if folder:
filechooser.set_current_folder(folder)
path = None
while True:
response = filechooser.run()
if response != gtk.RESPONSE_OK:
path = None
break
path = filechooser.get_filename()
if not os.path.exists(path):
break
submsg1 = _('A file named "%s" already exists') % os.path.abspath(path)
submsg2 = _('Do you which to replace it with the current project?')
text = '<span weight="bold" size="larger">%s</span>\n\n%s\n' % \
(submsg1, submsg2)
result = messagedialog(gtk.MESSAGE_ERROR,
text,
parent=parent,
buttons=gtk.BUTTONS_NONE,
additional_buttons=(gtk.STOCK_CANCEL,
gtk.RESPONSE_CANCEL,
_("Replace"),
gtk.RESPONSE_YES))
# the user want to overwrite the file
if result == gtk.RESPONSE_YES:
break
filechooser.destroy()
return path
def test():
#globals()['_'] = lambda s: s
#-print combobox(title='ComboBox', label='Combo', texts=['11','22','33'])
#-print spinbox2(title='Select the values',label1='cows:',value1=4, label2='rows:',value2=4)
#-print textbox(title='Edit The Text',label='Text',text='test text in textbox')
#-print inputbox(title='Input a Value',label='Input a value')
#-print inputbox2(title='Name and Host',label1='name:',text1='vgh', label2='host:',text2='/')
#print open(title='Open a file', patterns=['*.py'])
#-print open(title='Open a file', name_mimes={"Python Script":"text/x-python"})
#print save(title='Save a file', current_name='foobar.py')
#-print save(title='Save a file', current_name='foobar.py', name_mimes={"Python Script":"text/x-python"})
#-print info(short='This is a InfoBox', long='the long message')
#-print yesno(text='Are you OK?')
#-print savechanges()
error('An error occurred', gtk.Button('Woho'))
error('An error occurred',
'Long description bla bla bla bla bla bla bla bla bla\n'
'bla bla bla bla bla lblabl lablab bla bla bla bla bla\n'
'lbalbalbl alabl l blablalb lalba bla bla bla bla lbal\n')
if __name__ == '__main__':
test()
| [
[
8,
0,
0.0085,
0.0076,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0152,
0.0019,
0,
0.66,
0.0455,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0189,
0.0019,
0,
0.66... | [
"'''Gtk 对话框\n@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}\n@license: LGPLv3+\n'''",
"import os",
"import gtk",
"import gobject",
"__all__ = ['error', 'info', 'inputbox', 'messagedialog', 'open', 'save', 'warning',\n 'yesno']",
"try: import i18n\nexcept: from gettext import gettext as _",
"tr... |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''Gtk LaTex
@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}
@license: LGPLv3+
'''
import gtk, gobject
import thread
import time
import subprocess
import os, sys
import base64
try: import gtksourceview2
except: gtksourceview2 = None
try: import i18n
except: from gettext import gettext as _
latex_mark_list = [
# ["+", r" + "],
# ["<big>-</big>", r" - "],
["<b>⋅</b>", r" \cdot "],
["x", r" \times "],
["/", r" / "],
["<big><b>÷</b></big>", r" \frac { } { }"],
["a<sup>n</sup>", r"^{%s}"],
["a<sub>n</sub>", r"_{%s}"],
[" ≠ ", r" \neq "],
[" ≤ ", r" \le "],
[" ≥ ", r" \ge "],
[" ≡ ", r" \equiv "],
[" ≪ ", r" \ll "],
[" ≫ ", r" \gg "],
[" ≃ ", r" \simeq "],
[" ≈ ", r" \approx "],
["√¯", r" \sqrt[] {%s}"],
["∫", r" \int^{}_{} "],
["∬", r" \iint^{}_{} "],
["∮", r" \oint^{}_{} "],
["[ ]", r"\[ %s \]"],
["( )", r"\( %s \)"],
["{ }", r"\{ %s \}"],
["[≡]", r"""
\[
\begin{matrix}
a & b & c\\
c & e & f
\end{matrix}
\]
"""],
["(≡)", r"""
\begin{pmatrix}
a & b & c\\
c & e & f
\end{pmatrix}
"""],
["(<big> : </big>)", r"{ } \choose { } "],
["<big>(</big> x <big>)</big>", r"\left( { %s } \right)"],
[" ± ", r" \pm "],
[" ∓ ", r" \mp "],
[" ∨ ", r" \lor" ],
[" ∧ ", r" \land "],
["mod", r" \bmod "],
[" ∼ ", r" \sim "],
["∥ ", r" \parallel "],
[" ⊥ ", r" \perp "],
["<big><big>∞</big></big>", r" \infty "],
["∠", r" \angle "],
["<big><b>△</b></big>", r" \triangle "],
["∑", r" \sum_{ }^{ } "],
["lim", r"\lim_{ }"],
["⇒", r" \Rightarrow "],
["⇔", r" \Leftrightarrow "],
["∧", r" \wedge "],
["∨", r" \vee "],
["¬", r" \neg "],
["∀", r" \forall "],
["∃", r" \exists "],
["∅", r" \varnothing "],
["∈", r" \in "],
["∉", r" \notin "],
["⊆", r" \subseteq "],
["⊂", r" \subset "],
["∪", r" \cup "],
["⋂", r" \cap "],
["→", r" \to "],
["↦", r" \mapsto "],
["∏", r" \prod "],
["○", r" \circ "],
["sin", r" \sin "],
["cos", r" \cos "],
["tan", r" \tan "],
["ctan", r" \ctab "],
["asin", r" \asin "],
["acos", r" \acos "],
["atan", r" \atan "],
["actan", r" \actan "],
["log", r" \log "],
["ln", r" \ln "],
["...", r" \cdots "],
[" <sub>...</sub> ", r" \ldots "],
["<big>⁝</big>", r" \vdots "],
["<sup>.</sup>.<sub>.</sub>", r" \ddots "],
["α", r" \alpha "],
["β", r" \beta "],
["Γ", r" \Gamma "],
["γ", r" \gamma "],
["Δ", r" \Delta "],
["δ", r" \delta "],
["ϵ", r" \epsilon "],
["ε", r" \varepsilon "],
["ζ", r" \zeta "],
["η", r" \eta "],
["Θ", r" \Theta "],
["θ", r" \theta "],
["ϑ", r" \vartheta "],
["ι", r" \iota "],
["κ", r" \kappa "],
["Λ", r" \Lambda "],
["λ", r" \lambda "],
["μ", r" \mu "],
["ν", r" \nu "],
["Ξ", r" \Xi "],
["ξ", r" \xi "],
["Π", r" \Pi "],
["π", r" \pi "],
["ϖ", r" \varpi "],
["ρ", r" \rho "],
["ϱ", r" \varrho "],
["Σ", r" \Sigma "],
["σ", r" \sigma "],
["ς", r" \varsigma "],
["τ", r" \tau "],
["Υ", r" \Upsilon "],
["υ", r" \upsilon "],
["Φ", r" \Phi "],
["ϕ", r" \phi "],
["φ", r" \varphi "],
["χ", r" \chi "],
["Ψ", r" \Psi "],
["ψ", r" \psi "],
["Ω", r" \Omega "],
["ω", r" \omega "],
]
class GtkToolBoxView(gtk.TextView):
'''流式布局 ToolBox
'''
def __init__(self, latex=""):
'''初始化
'''
self.__gobject_init__()
self.unset_flags(gtk.CAN_FOCUS)
self.set_editable(0)
self.set_wrap_mode(gtk.WRAP_WORD)
self.connect('realize', self.on_realize)
pass
def on_realize(self, *args):
## 将默认 I 形鼠标指针换成箭头
self.get_window(gtk.TEXT_WINDOW_TEXT).set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
pass
def add(self, widget):
'''插入 Widget
'''
buffer = self.get_buffer()
iter = buffer.get_end_iter()
anchor = buffer.create_child_anchor(iter)
buffer.insert(iter, "")
widget.set_data('buffer_anchor', anchor)
self.add_child_at_anchor(widget, anchor)
pass
def remove(self, widget):
'''删除 widget
'''
anchor = widget.get_data('buffer_anchor')
if anchor:
buffer = self.get_buffer()
start = buffer.get_iter_at_child_anchor(anchor)
end = buffer.get_iter_at_offset( start.get_offset() + 1 )
buffer.delete(start, end)
pass
pass
class LatexMathExpressionsEditor(gtk.Table):
'''LaTex 数学公式编辑器
'''
def __init__(self, latex=""):
'''初始化
'''
self.__gobject_init__()
self.set_row_spacings(10)
self.set_col_spacings(10)
## latex edit
scrolledwindow1 = gtk.ScrolledWindow()
scrolledwindow1.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolledwindow1.show()
scrolledwindow1.set_shadow_type(gtk.SHADOW_IN)
if gtksourceview2:
self.latex_textview = gtksourceview2.View()
lm = gtksourceview2.language_manager_get_default()
language = lm.get_language('latex')
buffer = gtksourceview2.Buffer()
buffer.set_highlight_syntax(1)
buffer.set_language(language)
self.latex_textview.set_buffer(buffer)
pass
else:
self.latex_textview = gtk.TextView()
pass
self.latex_textview.set_wrap_mode(gtk.WRAP_WORD)
self.latex_textview.set_cursor_visible(True)
self.latex_textview.set_indent(5)
self.latex_textview.set_editable(True)
self.latex_textview.show()
#self.latex_textview.set_size_request(302, 200)
buffer = self.latex_textview.get_buffer()
buffer.set_text(latex)
scrolledwindow1.add(self.latex_textview)
self.attach(scrolledwindow1, 0, 1, 0, 1)
## latex preview
self.latex_image = gtk.Image()
#self.latex_image.set_size_request(200, 100)
self.latex_image.set_padding(0, 0)
self.latex_image.show()
box = gtk.EventBox()
box.show()
box.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color("#FFFFFF"))
box.add(self.latex_image)
self.attach(box, 0, 1, 1, 2)
## toolbox
toolview = GtkToolBoxView()
toolview.show()
#toolview.set_size_request(302, 200)
for text, mark in latex_mark_list:
label = gtk.Label()
label.set_markup(text)
label.set_size_request(30, 20)
label.show()
button = gtk.Button()
button.unset_flags(gtk.CAN_FOCUS)
button.add(label)
button.set_relief(gtk.RELIEF_NONE)
button.connect("clicked", self.on_insert_tex_mark, text, mark)
button.set_tooltip_text(mark)
button.show()
toolview.add(button)
pass
scrolledwindow2 = gtk.ScrolledWindow()
#scrolledwindow2.set_size_request(300, 400)
scrolledwindow2.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolledwindow2.show()
scrolledwindow2.set_shadow_type(gtk.SHADOW_IN)
scrolledwindow2.add(toolview)
self.attach(scrolledwindow2, 1, 2, 0, 2)
self.show_all()
thread.start_new_thread(self._up_preview, ())
pass
def get_latex(self, *args):
'''获取 LaTex
'''
buffer = self.latex_textview.get_buffer()
return buffer.get_text(buffer.get_start_iter(),buffer.get_end_iter())
def set_pic(self, data):
'''设置图像
'''
if not data:
return self.latex_image.set_from_stock(gtk.STOCK_DIALOG_ERROR, 2)
pix = gtk.gdk.PixbufLoader()
pix.write(data)
pix.close()
self.latex_image.set_from_pixbuf(pix.get_pixbuf())
return
def _up_preview(self, *args):
'''用于定时更新预览
'''
old_latex = ""
while True:
time.sleep(1)
if not self.get_window():
break
latex = self.get_latex()
if latex == old_latex:
continue
pic = tex2gif(latex, 1)
old_latex = self.get_latex()
if latex == self.get_latex():
gobject.idle_add(self.set_pic, pic)
pass
pass
#-print 'done'
return
def up_preview(self, pic):
'''更新预览'''
return
def insert_latex_mark(self, view, mark, text=""):
'''在 gtk.TextView 插入 LaTex 标记
'''
buffer = view.get_buffer()
bounds = buffer.get_selection_bounds()
select = bounds and buffer.get_slice(bounds[0], bounds[1]) or text
if mark.count("%") == 1:
mark = mark % select
pass
else:
mark = mark + select
pass
buffer.delete_selection(1, 1)
buffer.insert_at_cursor(mark)
pass
def on_insert_tex_mark(self, widget, text, mark):
print 'on_insert_tex_mark:', text, mark
self.insert_latex_mark(self.latex_textview, mark)
pass
def latex_dlg(latex="", title=_("LaTeX math expressions"), parent=None):
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
dlg.set_default_size(680, 400)
editor = LatexMathExpressionsEditor(latex)
dlg.vbox.pack_start(editor, True, True, 5)
dlg.show_all()
resp = dlg.run()
latex = editor.get_latex()
dlg.destroy()
if resp == gtk.RESPONSE_OK:
return latex
return None
def stastr(stri):
'''处理字符串的 ' "
'''
return stri.replace("\\","\\\\").replace(r'"',r'\"').replace(r"'",r"\'").replace('\n',r'\n')
def tex2gif(tex, transparent=1):
'''将 latex 数学公式转为 gif 图片,依赖 mimetex
mimetex -d -s 7 '公式'
'''
if transparent:
cmd = ['mimetex', '-d', '-s', '4', tex]
pass
else:
cmd = ['mimetex', '-d', '-o', '-s', '4', tex]
pass
i = subprocess.Popen(cmd, stdout=subprocess.PIPE)
gif = i.communicate()[0]
if gif.startswith('GIF'):
return gif
return ""
def gif2base64(gif):
'''将 gif 图像转为 base64 内联图像
'''
return 'data:image/gif;base64,%s' % base64.encodestring(gif).replace('\n', '')
def tex2base64(tex):
'''将 latex 数学公式转为 base64 内联图像
'''
return gif2base64(tex2gif(tex))
def tex2html(tex):
'''将 latex 数学公式转为 base64 内联图像
'''
return '<img alt="mimetex:%s" onDblClick="if(uptex) uptex(this);" style="vertical-align: middle; position: relative; top: -5pt; border: 0;" src="%s" />' % (stastr(tex), gif2base64(tex2gif(tex)))
if __name__=="__main__":
gtk.gdk.threads_init()
latex = ' '.join(sys.argv[1:]) or 'E=MC^2'
latex = latex_dlg(latex)
print latex
#print tex2html(latex)
pass
| [
[
8,
0,
0.0111,
0.0099,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0198,
0.0025,
0,
0.66,
0.0556,
166,
0,
2,
0,
0,
166,
0,
0
],
[
1,
0,
0.0222,
0.0025,
0,
0.66... | [
"'''Gtk LaTex\n@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}\n@license: LGPLv3+\n'''",
"import gtk, gobject",
"import thread",
"import time",
"import subprocess",
"import os, sys",
"import base64",
"try: import gtksourceview2\nexcept: gtksourceview2 = None",
"try: import gtksourceview2",
"exce... |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''GWrite
@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}
@license: LGPLv3+
'''
__version__ = '0.5.1'
import gtk, gobject
import gtkdialogs
import gtklatex
import config
import os, sys
import thread
import re
import urllib2
try: import i18n
except: from gettext import gettext as _
def get_doctitle(html):
title = ''
title = (re.findall(r'''<title>([^\0]*)</title>''', html)+[_("NewDocument")])[0]
return title
def proc_webkit_color(*webviews):
## 设置样式,让 WebKit 背景色使用 Gtk 颜色
style = webviews[0].get_style()
html_bg_color = str(style.base[gtk.STATE_NORMAL])
html_fg_color = str(style.text[gtk.STATE_NORMAL])
user_stylesheet = ''' html {
background-color: %s;
color: %s;\n}''' % (html_bg_color, html_fg_color)
user_stylesheet_file = config.user_stylesheet_file
file(user_stylesheet_file, 'w').write(user_stylesheet)
user_stylesheet_uri = 'file://' + user_stylesheet_file
for webview in webviews:
settings = webview.get_settings()
settings.set_property('user-stylesheet-uri', user_stylesheet_uri)
pass
def menu_find_with_stock(menu, stock):
# 查找菜单中对应 stock 的菜单项位置
n = 0
for i in menu.get_children():
try:
if i.get_image().get_stock()[0] == stock:
return n
except:
pass
n += 1
pass
return -1
Windows = []
new_num = 1
Title = _("GWrite")
## 是否单实例模式
#single_instance_mode = 0
#mdi_mode = 1
class MainWindow:
def __init__(self, editfile='', create = True, accel_group = None):
self.editfile = editfile
## 考虑已经打开文档的情况
if editfile:
for i in Windows:
if i.editfile == editfile:
#-print _('File "%s" already opened') % editfile
i.window.show()
i.window.present()
#@TODO: 让 edit 获得焦点
i.window.grab_focus()
i.edit.grab_focus()
del self
return
pass
pass
##
Windows.append(self)
import webkitedit # 推迟 import webkitedit
##
if accel_group is None:
self.accel_group = gtk.AccelGroup()
else:
self.accel_group = accel_group
if create:
self.window = gtk.Window()
gtk.window_set_default_icon_name("gtk-dnd")
self.window.set_icon_name("gtk-dnd")
self.window.set_default_size(780, 550)
self.window.set_title(Title)
if editfile: self.window.set_title(os.path.basename(self.editfile) + ' - ' + Title)
self.window.add_accel_group(self.accel_group)
self.window.show()
self.window.connect("delete_event", self.on_close)
## 用 Alt-1, Alt-2... 来切换标签页,gtk.gdk.MOD1_MASK 是 Alt
for k in range(1, 10):
self.accel_group.connect_group(gtk.gdk.keyval_from_name(str(k)), gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE, self.on_accel_connect_group)
pass
self.vbox1 = gtk.VBox(False, 0)
self.vbox1.show()
menubar1 = gtk.MenuBar()
menubar1.show()
menuitem_file = gtk.MenuItem(_("_File"))
menuitem_file.show()
menu_file = gtk.Menu()
menu_file.append(gtk.TearoffMenuItem())
self.menu_file = menu_file
menuitem_new = gtk.ImageMenuItem("gtk-new")
menuitem_new.show()
menuitem_new.connect("activate", self.on_new)
menuitem_new.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("n"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_new)
if config.mdi_mode:
menuitem_new_window = gtk.ImageMenuItem(_("New _Window"))
menuitem_new_window.show()
img = gtk.image_new_from_stock(gtk.STOCK_NEW, gtk.ICON_SIZE_MENU)
menuitem_new_window.set_image(img)
menuitem_new_window.connect("activate", self.on_new_window)
menuitem_new_window.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("n"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_new_window)
pass
menuitem_open = gtk.ImageMenuItem("gtk-open")
menuitem_open.show()
menuitem_open.connect("activate", self.on_open)
menuitem_open.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("o"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_open)
menuitem_save = gtk.ImageMenuItem("gtk-save")
menuitem_save.show()
menuitem_save.connect("activate", self.on_save)
menuitem_save.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("s"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_save)
menuitem_save_as = gtk.ImageMenuItem("gtk-save-as")
menuitem_save_as.show()
menuitem_save_as.connect("activate", self.on_save_as)
menu_file.append(menuitem_save_as)
menu_file.append(gtk.MenuItem())
menuitem = gtk.ImageMenuItem("gtk-properties")
menuitem.show()
menuitem.connect("activate", self.on_word_counts)
menu_file.append(menuitem)
menuitem_print = gtk.ImageMenuItem("gtk-print")
menuitem_print.show()
menuitem_print.connect("activate", self.on_print)
menuitem_print.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("p"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_print)
menu_file.append(gtk.MenuItem())
## 最近使用文件菜单 ################
self.recent = gtk.RecentManager()
menu_recent = gtk.RecentChooserMenu(self.recent)
menu_recent.set_limit(25)
#if editfile: self.add_recent(editfile) #改在 new_edit() 里统一添加
##
self.file_filter = gtk.RecentFilter()
self.file_filter.add_mime_type("text/html")
menu_recent.set_filter(self.file_filter)
menu_recent.connect("item-activated", self.on_select_recent)
menuitem_recent = gtk.ImageMenuItem(_("_Recently"))
menuitem_recent.set_image(gtk.image_new_from_icon_name("document-open-recent", gtk.ICON_SIZE_MENU))
menuitem_recent.set_submenu(menu_recent)
menu_file.append(menuitem_recent)
#####################################
menuitem_separatormenuitem1 = gtk.MenuItem()
menuitem_separatormenuitem1.show()
menu_file.append(menuitem_separatormenuitem1)
menuitem_close = gtk.ImageMenuItem("gtk-close")
menuitem_close.show()
menuitem_close.connect("activate", self.close_tab)
menuitem_close.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("w"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_close)
if config.mdi_mode:
menuitem_close_window = gtk.ImageMenuItem(_("Close Win_dow"))
menuitem_close_window.show()
img = gtk.image_new_from_stock(gtk.STOCK_CLOSE, gtk.ICON_SIZE_MENU)
menuitem_close_window.set_image(img)
menuitem_close_window.connect("activate", self.on_close)
menuitem_close_window.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("w"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_close_window)
pass
menuitem_quit = gtk.ImageMenuItem("gtk-quit")
menuitem_quit.show()
menuitem_quit.connect("activate", self.on_quit)
menuitem_quit.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("q"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_file.append(menuitem_quit)
menuitem_file.set_submenu(menu_file)
menubar1.append(menuitem_file)
menuitem_edit = gtk.MenuItem(_("_Edit"))
menuitem_edit.show()
menu_edit = gtk.Menu()
menu_edit.append(gtk.TearoffMenuItem())
menuitem_undo = gtk.ImageMenuItem("gtk-undo")
menuitem_undo.show()
menuitem_undo.connect("activate", self.do_undo)
menu_edit.append(menuitem_undo)
menuitem_redo = gtk.ImageMenuItem("gtk-redo")
menuitem_redo.show()
menuitem_redo.connect("activate", self.do_redo)
menu_edit.append(menuitem_redo)
menuitem_separator2 = gtk.MenuItem()
menuitem_separator2.show()
menu_edit.append(menuitem_separator2)
menuitem_cut = gtk.ImageMenuItem("gtk-cut")
menuitem_cut.show()
menuitem_cut.connect("activate", self.do_cut)
menu_edit.append(menuitem_cut)
menuitem_copy = gtk.ImageMenuItem("gtk-copy")
menuitem_copy.show()
menuitem_copy.connect("activate", self.do_copy)
menu_edit.append(menuitem_copy)
menuitem_paste = gtk.ImageMenuItem("gtk-paste")
menuitem_paste.show()
menuitem_paste.connect("activate", self.do_paste)
menu_edit.append(menuitem_paste)
menuitem_paste_unformatted = gtk.ImageMenuItem(_("Pa_ste Unformatted"))
menuitem_paste_unformatted.show()
menuitem_paste_unformatted.connect("activate", self.do_paste_unformatted)
menuitem_paste_unformatted.add_accelerator("activate",
self.accel_group, gtk.gdk.keyval_from_name("v"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_edit.append(menuitem_paste_unformatted)
menuitem_delete = gtk.ImageMenuItem("gtk-delete")
menuitem_delete.show()
menuitem_delete.connect("activate", self.do_delete)
menu_edit.append(menuitem_delete)
menuitem_separator3 = gtk.MenuItem()
menuitem_separator3.show()
menu_edit.append(menuitem_separator3)
menuitem_select_all = gtk.ImageMenuItem("gtk-select-all")
menuitem_select_all.show()
menuitem_select_all.connect("activate", self.do_selectall)
menu_edit.append(menuitem_select_all)
menuitem_separator12 = gtk.MenuItem()
menuitem_separator12.show()
menu_edit.append(menuitem_separator12)
menuitem_find = gtk.ImageMenuItem("gtk-find")
menuitem_find.show()
menuitem_find.connect("activate", self.show_findbar)
menuitem_find.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("f"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_edit.append(menuitem_find)
menuitem_find_and_replace = gtk.ImageMenuItem("gtk-find-and-replace")
menuitem_find_and_replace.show()
menuitem_find_and_replace.connect("activate", self.show_findbar)
menu_edit.append(menuitem_find_and_replace)
##
menu_edit.append(gtk.MenuItem())
menuitem = gtk.ImageMenuItem("gtk-preferences")
menuitem.show()
menuitem.connect("activate", lambda *i: (config.show_preference_dlg(), config.write()))
menu_edit.append(menuitem)
##
menuitem_edit.set_submenu(menu_edit)
menubar1.append(menuitem_edit)
menuitem_view = gtk.MenuItem(_("_View"))
menuitem_view.show()
menu_view = gtk.Menu()
menu_view.append(gtk.TearoffMenuItem())
## 缩放菜单
menuitem_zoom_in = gtk.ImageMenuItem(gtk.STOCK_ZOOM_IN)
menuitem_zoom_in.connect("activate", self.zoom_in)
# Ctrl++
menuitem_zoom_in.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("equal"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menuitem_zoom_in.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("plus"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menuitem_zoom_in.show()
menu_view.append(menuitem_zoom_in)
menuitem_zoom_out = gtk.ImageMenuItem(gtk.STOCK_ZOOM_OUT)
menuitem_zoom_out.connect("activate", self.zoom_out)
# Ctrl+-
menuitem_zoom_out.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("minus"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menuitem_zoom_out.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("underscore"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menuitem_zoom_out.show()
menu_view.append(menuitem_zoom_out)
menuitem_zoom_100 = gtk.ImageMenuItem(gtk.STOCK_ZOOM_100)
menuitem_zoom_100.connect("activate", self.zoom_100)
# Ctrl+0
menuitem_zoom_100.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("0"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menuitem_zoom_100.show()
menu_view.append(menuitem_zoom_100)
##
menuitem_separator10 = gtk.MenuItem()
menuitem_separator10.show()
menu_view.append(menuitem_separator10)
menuitem_update_contents = gtk.ImageMenuItem(_("Update _Contents"))
menuitem_update_contents.show()
menuitem_update_contents.connect("activate", self.view_update_contents)
img = gtk.image_new_from_stock(gtk.STOCK_INDEX, gtk.ICON_SIZE_MENU)
menuitem_update_contents.set_image(img)
menu_view.append(menuitem_update_contents)
menuitem_toggle_numbered_title = gtk.ImageMenuItem(_("Toggle _Numbered Title"))
menuitem_toggle_numbered_title.show()
menuitem_toggle_numbered_title.connect("activate", self.view_toggle_autonumber)
img = gtk.image_new_from_stock(gtk.STOCK_SORT_DESCENDING, gtk.ICON_SIZE_MENU)
menuitem_toggle_numbered_title.set_image(img)
menu_view.append(menuitem_toggle_numbered_title)
menuitem_update_images = gtk.ImageMenuItem(_("Update _Images"))
menuitem_update_images.show()
menuitem_update_images.connect("activate", self.do_update_images)
img = gtk.image_new_from_icon_name('stock_insert_image', gtk.ICON_SIZE_MENU)
menuitem_update_images.set_image(img)
menu_view.append(menuitem_update_images)
menuitem_separator10 = gtk.MenuItem()
menuitem_separator10.show()
menu_view.append(menuitem_separator10)
menuitem_view_source = gtk.ImageMenuItem(_("So_urce/Visual"))
menuitem_view_source.show()
menuitem_view_source.connect("activate", self.view_sourceview)
menuitem_view_source.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("u"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_view-html-source', gtk.ICON_SIZE_MENU)
menuitem_view_source.set_image(img)
menu_view.append(menuitem_view_source)
menuitem_view.set_submenu(menu_view)
menubar1.append(menuitem_view)
menuitem_insert = gtk.MenuItem(_("_Insert"))
menuitem_insert.show()
menu_insert = gtk.Menu()
menu_insert.append(gtk.TearoffMenuItem())
menuitem_picture = gtk.ImageMenuItem(_("_Picture"))
menuitem_picture.show()
menuitem_picture.connect("activate", self.do_insertimage)
img = gtk.image_new_from_icon_name('stock_insert_image', gtk.ICON_SIZE_MENU)
menuitem_picture.set_image(img)
menu_insert.append(menuitem_picture)
menuitem_link = gtk.ImageMenuItem(_("_Link"))
menuitem_link.show()
menuitem_link.connect("activate", self.do_createlink)
menuitem_link.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("k"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_link', gtk.ICON_SIZE_MENU)
menuitem_link.set_image(img)
menu_insert.append(menuitem_link)
menuitem_horizontalrule = gtk.ImageMenuItem(_("Horizontal_Rule"))
menuitem_horizontalrule.show()
menuitem_horizontalrule.connect("activate", self.do_inserthorizontalrule)
img = gtk.image_new_from_icon_name('stock_insert-rule', gtk.ICON_SIZE_MENU)
menuitem_horizontalrule.set_image(img)
menu_insert.append(menuitem_horizontalrule)
menuitem_insert_table = gtk.ImageMenuItem(_("_Table"))
menuitem_insert_table.show()
menuitem_insert_table.connect("activate", self.do_insert_table)
img = gtk.image_new_from_icon_name('stock_insert-table', gtk.ICON_SIZE_MENU)
menuitem_insert_table.set_image(img)
menu_insert.append(menuitem_insert_table)
menuitem_insert_html = gtk.ImageMenuItem(_("_HTML"))
menuitem_insert_html.show()
menuitem_insert_html.connect("activate", self.do_insert_html)
img = gtk.image_new_from_icon_name('stock_view-html-source', gtk.ICON_SIZE_MENU)
menuitem_insert_html.set_image(img)
menu_insert.append(menuitem_insert_html)
menuitem_separator9 = gtk.MenuItem()
menuitem_separator9.show()
menu_insert.append(menuitem_separator9)
##
menuitem_latex_math_equation = gtk.ImageMenuItem(_("LaTeX _Equation"))
menuitem_latex_math_equation.show()
menuitem_latex_math_equation.connect("activate", self.do_insert_latex_math_equation)
menu_insert.append(menuitem_latex_math_equation)
menu_insert.append(gtk.MenuItem())
##
menuitem_insert_contents = gtk.ImageMenuItem(_("_Contents"))
menuitem_insert_contents.show()
menuitem_insert_contents.connect("activate", self.do_insert_contents)
img = gtk.image_new_from_stock(gtk.STOCK_INDEX, gtk.ICON_SIZE_MENU)
menuitem_insert_contents.set_image(img)
menu_insert.append(menuitem_insert_contents)
menuitem_insert.set_submenu(menu_insert)
menubar1.append(menuitem_insert)
menuitem_style = gtk.MenuItem(_("_Style"))
menuitem_style.show()
menu_style = gtk.Menu()
menu_style.append(gtk.TearoffMenuItem())
menuitem_normal = gtk.ImageMenuItem(_("_Normal"))
menuitem_normal.show()
menuitem_normal.connect("activate", self.do_formatblock_p)
menuitem_normal.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("0"), gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_insert_section', gtk.ICON_SIZE_MENU)
menuitem_normal.set_image(img)
menu_style.append(menuitem_normal)
menuitem_separator4 = gtk.MenuItem()
menuitem_separator4.show()
menu_style.append(menuitem_separator4)
menuitem_heading_1 = gtk.ImageMenuItem(_("Heading _1"))
menuitem_heading_1.show()
menuitem_heading_1.connect("activate", self.do_formatblock_h1)
menuitem_heading_1.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("1"), gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_insert-header', gtk.ICON_SIZE_MENU)
menuitem_heading_1.set_image(img)
menu_style.append(menuitem_heading_1)
menuitem_heading_2 = gtk.ImageMenuItem(_("Heading _2"))
menuitem_heading_2.show()
menuitem_heading_2.connect("activate", self.do_formatblock_h2)
menuitem_heading_2.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("2"), gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_line-spacing-2', gtk.ICON_SIZE_MENU)
menuitem_heading_2.set_image(img)
menu_style.append(menuitem_heading_2)
menuitem_heading_3 = gtk.ImageMenuItem(_("Heading _3"))
menuitem_heading_3.show()
menuitem_heading_3.connect("activate", self.do_formatblock_h3)
menuitem_heading_3.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("3"), gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_line-spacing-1', gtk.ICON_SIZE_MENU)
menuitem_heading_3.set_image(img)
menu_style.append(menuitem_heading_3)
menuitem_heading_4 = gtk.ImageMenuItem(_("Heading _4"))
menuitem_heading_4.show()
menuitem_heading_4.connect("activate", self.do_formatblock_h4)
menuitem_heading_4.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("4"), gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_line-spacing-1.5', gtk.ICON_SIZE_MENU)
menuitem_heading_4.set_image(img)
menu_style.append(menuitem_heading_4)
menuitem_heading_5 = gtk.ImageMenuItem(_("Heading _5"))
menuitem_heading_5.show()
menuitem_heading_5.connect("activate", self.do_formatblock_h5)
menuitem_heading_5.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("5"), gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_list_enum-off', gtk.ICON_SIZE_MENU)
menuitem_heading_5.set_image(img)
menu_style.append(menuitem_heading_5)
menuitem_heading_6 = gtk.ImageMenuItem(_("Heading _6"))
menuitem_heading_6.show()
menuitem_heading_6.connect("activate", self.do_formatblock_h6)
menuitem_heading_6.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("6"), gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_list_enum-off', gtk.ICON_SIZE_MENU)
menuitem_heading_6.set_image(img)
menu_style.append(menuitem_heading_6)
menuitem_separator5 = gtk.MenuItem()
menuitem_separator5.show()
menu_style.append(menuitem_separator5)
menuitem_bulleted_list = gtk.ImageMenuItem(_("_Bulleted List"))
menuitem_bulleted_list.show()
menuitem_bulleted_list.connect("activate", self.do_insertunorderedlist)
img = gtk.image_new_from_icon_name('stock_list_bullet', gtk.ICON_SIZE_MENU)
menuitem_bulleted_list.set_image(img)
menu_style.append(menuitem_bulleted_list)
menuitem_numbered_list = gtk.ImageMenuItem(_("Numbered _List"))
menuitem_numbered_list.show()
menuitem_numbered_list.connect("activate", self.do_insertorderedlist)
img = gtk.image_new_from_icon_name('stock_list_enum', gtk.ICON_SIZE_MENU)
menuitem_numbered_list.set_image(img)
menu_style.append(menuitem_numbered_list)
menuitem_separator6 = gtk.MenuItem()
menuitem_separator6.show()
menu_style.append(menuitem_separator6)
div1 = gtk.ImageMenuItem(_("Di_v"))
div1.show()
div1.connect("activate", self.do_formatblock_div)
img = gtk.image_new_from_icon_name('stock_tools-hyphenation', gtk.ICON_SIZE_MENU)
div1.set_image(img)
menu_style.append(div1)
address1 = gtk.ImageMenuItem(_("A_ddress"))
address1.show()
address1.connect("activate", self.do_formatblock_address)
img = gtk.image_new_from_icon_name('stock_tools-hyphenation', gtk.ICON_SIZE_MENU)
address1.set_image(img)
menu_style.append(address1)
#menuitem_formatblock_code = gtk.ImageMenuItem(_("_Code"))
#menuitem_formatblock_code.show()
#menuitem_formatblock_code.connect("activate", self.do_formatblock_code)
#
#img = gtk.image_new_from_icon_name('stock_text-monospaced', gtk.ICON_SIZE_MENU)
#menuitem_formatblock_code.set_image(img)
#menu_style.append(menuitem_formatblock_code)
menuitem_formatblock_blockquote = gtk.ImageMenuItem(_("Block_quote"))
menuitem_formatblock_blockquote.show()
menuitem_formatblock_blockquote.connect("activate", self.do_formatblock_blockquote)
img = gtk.image_new_from_icon_name('stock_list-insert-unnumbered', gtk.ICON_SIZE_MENU)
menuitem_formatblock_blockquote.set_image(img)
menu_style.append(menuitem_formatblock_blockquote)
menuitem_formatblock_pre = gtk.ImageMenuItem(_("_Preformat"))
menuitem_formatblock_pre.show()
menuitem_formatblock_pre.connect("activate", self.do_formatblock_pre)
menuitem_formatblock_pre.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("t"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_text-quickedit', gtk.ICON_SIZE_MENU)
menuitem_formatblock_pre.set_image(img)
menu_style.append(menuitem_formatblock_pre)
menuitem_style.set_submenu(menu_style)
menubar1.append(menuitem_style)
menuitem_format = gtk.MenuItem(_("For_mat"))
menuitem_format.show()
menu_format = gtk.Menu()
menu_format.append(gtk.TearoffMenuItem())
menuitem_bold = gtk.ImageMenuItem("gtk-bold")
menuitem_bold.show()
menuitem_bold.connect("activate", self.on_bold)
menuitem_bold.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("b"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_bold)
menuitem_italic = gtk.ImageMenuItem("gtk-italic")
menuitem_italic.show()
menuitem_italic.connect("activate", self.do_italic)
menuitem_italic.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("i"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_italic)
menuitem_underline = gtk.ImageMenuItem("gtk-underline")
menuitem_underline.show()
menuitem_underline.connect("activate", self.do_underline)
menuitem_underline.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("u"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_underline)
menuitem_strikethrough = gtk.ImageMenuItem("gtk-strikethrough")
menuitem_strikethrough.show()
menuitem_strikethrough.connect("activate", self.do_strikethrough)
menu_format.append(menuitem_strikethrough)
self.separator7 = gtk.MenuItem()
self.separator7.show()
menu_format.append(self.separator7)
menuitem_font_fontname = gtk.ImageMenuItem("gtk-select-font")
menuitem_font_fontname.show()
#menuitem_font_fontname.connect("activate", self.do_font_fontname)
## 字体列表菜单 #########################################
self.fontname_menu = gtk.Menu()
self.fontname_menu.append(gtk.TearoffMenuItem())
fontnames = sorted(( familie.get_name() for familie in gtk.Label().get_pango_context().list_families() ))
## 调整字体列表顺序,将中文字体提至前列
for fontname in fontnames:
try:
fontname.decode('ascii')
pass
except:
fontnames.remove(fontname)
fontnames.insert(0, fontname)
pass
pass
for fontname in ['Serif', 'Sans', 'Sans-serif', 'Monospace', ''] + fontnames:
if fontname:
menu = gtk.MenuItem(fontname)
menu.connect("activate", self.do_font_fontname, fontname)
pass
else:
menu = gtk.MenuItem()
pass
menu.show()
self.fontname_menu.append(menu)
pass
self.fontname_menu.show()
menuitem_font_fontname.set_submenu(self.fontname_menu)
###########################################
menu_format.append(menuitem_font_fontname)
menuitem_font_size = gtk.ImageMenuItem(_("Font _Size"))
menuitem_font_size.show()
img = gtk.image_new_from_icon_name('stock_font-size', gtk.ICON_SIZE_MENU)
menuitem_font_size.set_image(img)
self.font_size1_menu = gtk.Menu()
self.font_size1_menu.append(gtk.TearoffMenuItem())
menuitem_fontsize_1 = gtk.MenuItem(_("_1"))
menuitem_fontsize_1.show()
menuitem_fontsize_1.connect("activate", self.do_fontsize_1)
self.font_size1_menu.append(menuitem_fontsize_1)
menuitem_fontsize_2 = gtk.MenuItem(_("_2"))
menuitem_fontsize_2.show()
menuitem_fontsize_2.connect("activate", self.do_fontsize_2)
self.font_size1_menu.append(menuitem_fontsize_2)
menuitem_fontsize_3 = gtk.MenuItem(_("_3"))
menuitem_fontsize_3.show()
menuitem_fontsize_3.connect("activate", self.do_fontsize_3)
self.font_size1_menu.append(menuitem_fontsize_3)
menuitem_fontsize_4 = gtk.MenuItem(_("_4"))
menuitem_fontsize_4.show()
menuitem_fontsize_4.connect("activate", self.do_fontsize_4)
self.font_size1_menu.append(menuitem_fontsize_4)
menuitem_fontsize_5 = gtk.MenuItem(_("_5"))
menuitem_fontsize_5.show()
menuitem_fontsize_5.connect("activate", self.do_fontsize_5)
self.font_size1_menu.append(menuitem_fontsize_5)
menuitem_fontsize_6 = gtk.MenuItem(_("_6"))
menuitem_fontsize_6.show()
menuitem_fontsize_6.connect("activate", self.do_fontsize_6)
self.font_size1_menu.append(menuitem_fontsize_6)
menuitem_fontsize_7 = gtk.MenuItem(_("_7"))
menuitem_fontsize_7.show()
menuitem_fontsize_7.connect("activate", self.do_fontsize_7)
self.font_size1_menu.append(menuitem_fontsize_7)
menuitem_font_size.set_submenu(self.font_size1_menu)
menu_format.append(menuitem_font_size)
menuitem_color = gtk.ImageMenuItem("gtk-select-color")
menuitem_color.show()
menuitem_color.connect("activate", self.on_color_select_forecolor)
menu_format.append(menuitem_color)
menuitem_bg_color = gtk.ImageMenuItem(_("_Highlight"))
menuitem_bg_color.show()
menuitem_bg_color.connect("activate", self.do_color_hilitecolor)
menuitem_bg_color.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("h"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_text_color_hilight', gtk.ICON_SIZE_MENU)
menuitem_bg_color.set_image(img)
menu_format.append(menuitem_bg_color)
menuitem_bg_color_select = gtk.ImageMenuItem(_("_HiliteColor"))
menuitem_bg_color_select.show()
menuitem_bg_color_select.connect("activate", self.on_color_select_hilitecolor)
img = gtk.image_new_from_stock(gtk.STOCK_SELECT_COLOR, gtk.ICON_SIZE_MENU)
menuitem_bg_color_select.set_image(img)
menu_format.append(menuitem_bg_color_select)
menuitem_clearformat = gtk.ImageMenuItem(_("_Clear format"))
img = gtk.image_new_from_icon_name("gtk-clear", gtk.ICON_SIZE_MENU)
menuitem_clearformat.set_image(img)
menuitem_clearformat.show()
menuitem_clearformat.connect("activate", self.do_removeformat)
menuitem_clearformat.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("backslash"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_clearformat)
self.separator8 = gtk.MenuItem()
self.separator8.show()
menu_format.append(self.separator8)
menuitem_justifyleft = gtk.ImageMenuItem("gtk-justify-left")
menuitem_justifyleft.show()
menuitem_justifyleft.connect("activate", self.do_justifyleft)
menuitem_justifyleft.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("l"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_justifyleft)
menuitem_justifycenter = gtk.ImageMenuItem("gtk-justify-center")
menuitem_justifycenter.show()
menuitem_justifycenter.connect("activate", self.do_justifycenter)
menuitem_justifycenter.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("e"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_justifycenter)
menuitem_justifyright = gtk.ImageMenuItem("gtk-justify-right")
menuitem_justifyright.show()
menuitem_justifyright.connect("activate", self.do_justifyright)
menuitem_justifyright.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("r"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_justifyright)
menuitem_justifyfull = gtk.ImageMenuItem("gtk-justify-fill")
menuitem_justifyfull.show()
menuitem_justifyfull.connect("activate", self.do_justifyfull)
menuitem_justifyfull.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("j"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_format.append(menuitem_justifyfull)
self.separator11 = gtk.MenuItem()
self.separator11.show()
menu_format.append(self.separator11)
menuitem_increase_indent = gtk.ImageMenuItem("gtk-indent")
menuitem_increase_indent.show()
menuitem_increase_indent.connect("activate", self.do_indent)
menu_format.append(menuitem_increase_indent)
menuitem_decrease_indent = gtk.ImageMenuItem("gtk-unindent")
menuitem_decrease_indent.show()
menuitem_decrease_indent.connect("activate", self.do_outdent)
menu_format.append(menuitem_decrease_indent)
self.separator16 = gtk.MenuItem()
self.separator16.show()
menu_format.append(self.separator16)
menuitem_superscript = gtk.ImageMenuItem(_("Su_perscript"))
menuitem_superscript.show()
menuitem_superscript.connect("activate", self.do_superscript)
menuitem_superscript.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("period"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_superscript', gtk.ICON_SIZE_MENU)
menuitem_superscript.set_image(img)
menu_format.append(menuitem_superscript)
menuitem_subscript = gtk.ImageMenuItem(_("Subs_cript"))
menuitem_subscript.show()
menuitem_subscript.connect("activate", self.do_subscript)
menuitem_subscript.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("comma"), gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
img = gtk.image_new_from_icon_name('stock_subscript', gtk.ICON_SIZE_MENU)
menuitem_subscript.set_image(img)
menu_format.append(menuitem_subscript)
menuitem_format.set_submenu(menu_format)
menubar1.append(menuitem_format)
##
menuitem_tools = gtk.MenuItem(_("_Tools"))
menuitem_tools.show()
menu_tools = gtk.Menu()
menu_tools.append(gtk.TearoffMenuItem())
menuitem_word_count = gtk.ImageMenuItem(_("_Word Count"))
img = gtk.image_new_from_icon_name('gtk-index', gtk.ICON_SIZE_MENU)
menuitem_word_count.set_image(img)
menuitem_word_count.show()
menuitem_word_count.connect("activate", self.on_word_counts)
menuitem_word_count.add_accelerator("activate", self.accel_group, gtk.gdk.keyval_from_name("c"), gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK, gtk.ACCEL_VISIBLE)
menu_tools.append(menuitem_word_count)
menuitem_tools.set_submenu(menu_tools)
menubar1.append(menuitem_tools)
##
menuitem_help = gtk.MenuItem(_("_Help"))
menuitem_help.show()
menu_help = gtk.Menu()
menu_help.append(gtk.TearoffMenuItem())
menuitem_about = gtk.ImageMenuItem("gtk-about")
menuitem_about.show()
menuitem_about.connect("activate", self.on_about)
menu_help.append(menuitem_about)
menuitem_help.set_submenu(menu_help)
menubar1.append(menuitem_help)
menubar1.show_all()
self.vbox1.pack_start(menubar1, False, False, 0)
## 工具栏
self.toolbar1 = gtk.Toolbar()
self.toolbar1.show()
toolbutton_new = gtk.ToolButton()
toolbutton_new.set_tooltip_text(_("New"))
toolbutton_new.show()
toolbutton_new.set_stock_id(gtk.STOCK_NEW)
toolbutton_new.connect("clicked", self.on_new)
self.toolbar1.add(toolbutton_new)
toolbutton_open = gtk.MenuToolButton(gtk.STOCK_OPEN)
toolbutton_open.set_tooltip_text(_("Open"))
toolbutton_open.show()
#toolbutton_open.set_stock_id(gtk.STOCK_OPEN)
toolbutton_open.connect("clicked", self.on_open)
toolbutton_open.set_menu(menu_recent)
self.toolbar1.add(toolbutton_open)
toolbutton_save = gtk.ToolButton()
toolbutton_save.set_tooltip_text(_("Save"))
toolbutton_save.show()
toolbutton_save.set_stock_id(gtk.STOCK_SAVE)
toolbutton_save.connect("clicked", self.on_save)
self.toolbar1.add(toolbutton_save)
separatortoolitem1 = gtk.SeparatorToolItem()
separatortoolitem1.show()
self.toolbar1.add(separatortoolitem1)
toolbutton_undo = gtk.ToolButton()
toolbutton_undo.set_tooltip_text(_("Undo"))
toolbutton_undo.show()
toolbutton_undo.set_stock_id(gtk.STOCK_UNDO)
toolbutton_undo.connect("clicked", self.do_undo)
self.toolbar1.add(toolbutton_undo)
toolbutton_redo = gtk.ToolButton()
toolbutton_redo.set_tooltip_text(_("Redo"))
toolbutton_redo.show()
toolbutton_redo.set_stock_id(gtk.STOCK_REDO)
toolbutton_redo.connect("clicked", self.do_redo)
self.toolbar1.add(toolbutton_redo)
separatortoolitem3 = gtk.SeparatorToolItem()
separatortoolitem3.show()
self.toolbar1.add(separatortoolitem3)
toolbutton_cut = gtk.ToolButton()
toolbutton_cut.set_tooltip_text(_("Cut"))
toolbutton_cut.show()
toolbutton_cut.set_stock_id(gtk.STOCK_CUT)
toolbutton_cut.connect("clicked", self.do_cut)
self.toolbar1.add(toolbutton_cut)
toolbutton_copy = gtk.ToolButton()
toolbutton_copy.set_tooltip_text(_("Copy"))
toolbutton_copy.show()
toolbutton_copy.set_stock_id(gtk.STOCK_COPY)
toolbutton_copy.connect("clicked", self.do_copy)
self.toolbar1.add(toolbutton_copy)
toolbutton_paste = gtk.ToolButton()
toolbutton_paste.set_tooltip_text(_("Paste"))
toolbutton_paste.show()
toolbutton_paste.set_stock_id(gtk.STOCK_PASTE)
toolbutton_paste.connect("clicked", self.do_paste)
self.toolbar1.add(toolbutton_paste)
separatortoolitem2 = gtk.SeparatorToolItem()
separatortoolitem2.show()
self.toolbar1.add(separatortoolitem2)
## p, h1, h2 样式
label1 = gtk.Label("")
label1.set_markup("<b>P</b>")
button1 = gtk.ToolButton(label1, _("Paragraph"))
button1.set_tooltip_text(_("Paragraph"))
button1.connect("clicked", self.do_formatblock_p)
button1.show()
self.toolbar1.add( button1)
label1 = gtk.Label("")
label1.set_markup("<big><big><b>H1</b></big></big>")
button1 = gtk.ToolButton(label1, _("Heading 1"))
button1.set_tooltip_text(_("Heading 1"))
button1.connect("clicked", self.do_formatblock_h1)
button1.show()
self.toolbar1.add( button1)
label1 = gtk.Label("")
label1.set_markup("<big><b>H2</b></big>")
button1 = gtk.ToolButton(label1, _("Heading 2"))
button1.set_tooltip_text(_("Heading 2"))
button1.connect("clicked", self.do_formatblock_h2)
button1.show()
self.toolbar1.add( button1)
## h3 样式
label1 = gtk.Label("")
label1.set_markup("<b>H3</b>")
button1 = gtk.MenuToolButton(label1, _("Heading 3"))
button1.set_tooltip_text(_("Heading 3"))
button1.set_arrow_tooltip_markup(_("Style"))
button1.connect("clicked", self.do_formatblock_h3)
button1.show()
self.toolbar1.add( button1)
menu_style = gtk.Menu()
menuitem_heading_4 = gtk.ImageMenuItem(_("Heading _4"))
menuitem_heading_4.show()
menuitem_heading_4.connect("activate", self.do_formatblock_h4)
img = gtk.image_new_from_icon_name('stock_line-spacing-1.5', gtk.ICON_SIZE_MENU)
menuitem_heading_4.set_image(img)
menu_style.append(menuitem_heading_4)
menuitem_heading_5 = gtk.ImageMenuItem(_("Heading _5"))
menuitem_heading_5.show()
menuitem_heading_5.connect("activate", self.do_formatblock_h5)
img = gtk.image_new_from_icon_name('stock_list_enum-off', gtk.ICON_SIZE_MENU)
menuitem_heading_5.set_image(img)
menu_style.append(menuitem_heading_5)
menuitem_heading_6 = gtk.ImageMenuItem(_("Heading _6"))
menuitem_heading_6.show()
menuitem_heading_6.connect("activate", self.do_formatblock_h6)
img = gtk.image_new_from_icon_name('stock_list_enum-off', gtk.ICON_SIZE_MENU)
menuitem_heading_6.set_image(img)
menu_style.append(menuitem_heading_6)
menuitem_separator5 = gtk.MenuItem()
menuitem_separator5.show()
menu_style.append(menuitem_separator5)
menuitem_bulleted_list = gtk.ImageMenuItem(_("_Bulleted List"))
menuitem_bulleted_list.show()
menuitem_bulleted_list.connect("activate", self.do_insertunorderedlist)
img = gtk.image_new_from_icon_name('stock_list_bullet', gtk.ICON_SIZE_MENU)
menuitem_bulleted_list.set_image(img)
menu_style.append(menuitem_bulleted_list)
menuitem_numbered_list = gtk.ImageMenuItem(_("Numbered _List"))
menuitem_numbered_list.show()
menuitem_numbered_list.connect("activate", self.do_insertorderedlist)
img = gtk.image_new_from_icon_name('stock_list_enum', gtk.ICON_SIZE_MENU)
menuitem_numbered_list.set_image(img)
menu_style.append(menuitem_numbered_list)
menuitem_separator6 = gtk.MenuItem()
menuitem_separator6.show()
menu_style.append(menuitem_separator6)
div1 = gtk.ImageMenuItem(_("Di_v"))
div1.show()
div1.connect("activate", self.do_formatblock_div)
img = gtk.image_new_from_icon_name('stock_tools-hyphenation', gtk.ICON_SIZE_MENU)
div1.set_image(img)
menu_style.append(div1)
address1 = gtk.ImageMenuItem(_("A_ddress"))
address1.show()
address1.connect("activate", self.do_formatblock_address)
img = gtk.image_new_from_icon_name('stock_tools-hyphenation', gtk.ICON_SIZE_MENU)
address1.set_image(img)
menu_style.append(address1)
#menuitem_formatblock_code = gtk.ImageMenuItem(_("_Code"))
#menuitem_formatblock_code.show()
#menuitem_formatblock_code.connect("activate", self.do_formatblock_code)
#
#img = gtk.image_new_from_icon_name('stock_text-monospaced', gtk.ICON_SIZE_MENU)
#menuitem_formatblock_code.set_image(img)
#menu_style.append(menuitem_formatblock_code)
menuitem_formatblock_blockquote = gtk.ImageMenuItem(_("Block_quote"))
menuitem_formatblock_blockquote.show()
menuitem_formatblock_blockquote.connect("activate", self.do_formatblock_blockquote)
img = gtk.image_new_from_icon_name('stock_list-insert-unnumbered', gtk.ICON_SIZE_MENU)
menuitem_formatblock_blockquote.set_image(img)
menu_style.append(menuitem_formatblock_blockquote)
menuitem_formatblock_pre = gtk.ImageMenuItem(_("_Preformat"))
menuitem_formatblock_pre.show()
menuitem_formatblock_pre.connect("activate", self.do_formatblock_pre)
img = gtk.image_new_from_icon_name('stock_text-quickedit', gtk.ICON_SIZE_MENU)
menuitem_formatblock_pre.set_image(img)
menu_style.append(menuitem_formatblock_pre)
button1.set_menu(menu_style)
########################
## 粗体按钮菜单
menu_format = gtk.Menu()
menu_format.append(gtk.TearoffMenuItem())
menuitem_italic = gtk.ImageMenuItem("gtk-italic")
menuitem_italic.show()
menuitem_italic.connect("activate", self.do_italic)
menu_format.append(menuitem_italic)
menuitem_underline = gtk.ImageMenuItem("gtk-underline")
menuitem_underline.show()
menuitem_underline.connect("activate", self.do_underline)
menu_format.append(menuitem_underline)
menuitem_strikethrough = gtk.ImageMenuItem("gtk-strikethrough")
menuitem_strikethrough.show()
menuitem_strikethrough.connect("activate", self.do_strikethrough)
menu_format.append(menuitem_strikethrough)
separatortoolitem4 = gtk.SeparatorToolItem()
separatortoolitem4.show()
self.toolbar1.add(separatortoolitem4)
toolbutton_bold = gtk.MenuToolButton(gtk.STOCK_BOLD)
toolbutton_bold.set_label(_("Bold"))
toolbutton_bold.set_tooltip_text(_("Bold"))
toolbutton_bold.show()
toolbutton_bold.set_stock_id(gtk.STOCK_BOLD)
toolbutton_bold.connect("clicked", self.on_bold)
toolbutton_bold.set_menu(menu_format)
self.toolbar1.add(toolbutton_bold)
## 高亮颜色
toolbutton_hilitecolor = gtk.MenuToolButton("")
toolbutton_hilitecolor.set_icon_name("stock_text_color_hilight")
toolbutton_hilitecolor.set_label(_("Highlight"))
toolbutton_hilitecolor.set_tooltip_text(_("Highlight"))
toolbutton_hilitecolor.set_arrow_tooltip_markup(_("Select hilitecolor"))
toolbutton_hilitecolor.set_menu(gtk.Menu())
toolbutton_hilitecolor.show()
toolbutton_hilitecolor.connect("clicked", self.do_color_hilitecolor)
### 处理 ToolButton 箭头
on_color_select_hilitecolor = self.on_color_select_hilitecolor
ib, mb = toolbutton_hilitecolor.get_children()[0].get_children()
mb.connect("clicked", self.on_color_select_hilitecolor)
self.toolbar1.add(toolbutton_hilitecolor)
## 清除格式
button1 = gtk.ToolButton()
button1.set_icon_name("gtk-clear")
button1.set_label(_("Clear format"))
button1.set_tooltip_text(_("Clear format"))
button1.show()
button1.connect("clicked", self.do_removeformat)
self.toolbar1.add(button1)
### 字体菜单按钮
#toolbutton_font = gtk.MenuToolButton("gtk-select-font")
#toolbutton_font.set_label(_("Font"))
#toolbutton_font.set_tooltip_text(_("Font"))
#toolbutton_font.show()
#toolbutton_font.set_menu(self.fontname_menu)
### 处理 gtk.MenuToolButton 按钮
#m = toolbutton_font
#ib, mb = m.child.children()
#mb.remove(mb.child)
#ib.child.reparent(mb)
#m.child.remove(ib)
#self.toolbar1.add(toolbutton_font)
##
###############
self.toolbar = gtk.HandleBox()
self.toolbar.add(self.toolbar1)
self.toolbar.show_all()
self.vbox1.pack_start(self.toolbar, False, False, 0)
## 编辑区
#self.editport = gtk.Viewport()
#self.editport.show()
#self.editport.set_shadow_type(gtk.SHADOW_NONE)
#
#self.vbox1.pack_start(self.editport)
##
self.notebox = gtk.Notebook()
self.notebox.set_tab_pos(2) # 0, 1, 2, 3 -> left, top, right, bottom
self.notebox.set_border_width(0)
#self.notebox.popup_enable()
self.notebox.set_property('homogeneous', 0)
self.notebox.unset_flags(gtk.CAN_FOCUS)
self.notebox.set_scrollable(True)
self.notebox.connect("switch-page", self.on_mdi_switch_page)
self.notebox.connect("button-press-event", self.on_mdi_menu) # 用 "button-release-event" 会不能中止事件向上传递
self.notebox.show()
editbox = self.new_edit(self.editfile)
editbox.show()
self.notebox_insert_page(editbox)
self.notebox.set_tab_reorderable(editbox, True)
self.notebox.show_all()
self.vbox1.pack_start(self.notebox)
## 搜索栏
self.findbar = gtk.HandleBox()
self.findbar.set_shadow_type(gtk.SHADOW_OUT)
self.findbox = gtk.HBox(False, 0)
self.findbox.show()
button_hidefindbar = gtk.Button()
button_hidefindbar.set_tooltip_text(_("Close Findbar"))
button_hidefindbar.show()
button_hidefindbar.set_relief(gtk.RELIEF_NONE)
button_hidefindbar.connect("clicked", self.hide_findbar)
image113 = gtk.Image()
image113.set_from_stock(gtk.STOCK_CLOSE, 1)
image113.show()
button_hidefindbar.add(image113)
self.findbox.pack_start(button_hidefindbar, False, False, 0)
self.entry_searchtext = gtk.Entry()
self.entry_searchtext.show()
self.entry_searchtext.connect("changed", self.do_highlight_text_matches)
#self.entry_searchtext.set_property("primary-icon-stock", "gtk-go-back")
#self.entry_searchtext.set_property("primary-icon-tooltip-text", _("Find Previous"))
#self.entry_searchtext.set_property("secondary-icon-stock", "gtk-find")
#self.entry_searchtext.set_property("secondary-icon-tooltip-text", _("Find Next"))
self.entry_searchtext.set_property("primary-icon-stock", "gtk-find")
self.entry_searchtext.set_property("primary-icon-tooltip-text", _("Find Next"))
self.entry_searchtext.connect("icon-release", self.do_find_text)
self.entry_searchtext.set_tooltip_text(_("Search text"))
#self.entry_searchtext.set_flags(gtk.CAN_DEFAULT)
#self.entry_searchtext.grab_focus()
self.findbox.pack_start(self.entry_searchtext)
button1 = gtk.Button()
button1.set_tooltip_text(_("Find Previous"))
button1.show()
button1.set_relief(gtk.RELIEF_NONE)
button1.connect("clicked", self.do_find_text_backward)
image1 = gtk.Image()
image1.set_from_stock(gtk.STOCK_GO_BACK, 4)
image1.show()
button1.add(image1)
self.findbox.pack_start(button1, False, False, 0)
button_search_text = gtk.Button(_("Find"))
img = gtk.Image()
img.set_from_stock("gtk-find", 4)
img.show()
button_search_text.set_image(img)
button_search_text.set_tooltip_text(_("Find Next"))
button_search_text.show()
button_search_text.set_relief(gtk.RELIEF_NONE)
button_search_text.connect("clicked", self.do_find_text)
button_search_text.add_accelerator("clicked", self.accel_group, gtk.gdk.keyval_from_name("F3"), 0, gtk.ACCEL_VISIBLE)
self.findbox.pack_start(button_search_text, False, False, 0)
self.findbox.pack_start(gtk.VSeparator(), False, False, 3)
self.entry_replace_text = gtk.Entry()
self.entry_replace_text.show()
self.entry_replace_text.set_tooltip_text(_("Replace text"))
self.entry_replace_text.set_property("primary-icon-stock", "gtk-find-and-replace")
self.entry_replace_text.set_property("primary-icon-tooltip-text", _("Replace"))
self.findbox.pack_start(self.entry_replace_text)
button_replace_text = gtk.Button()
button_replace_text.set_tooltip_text(_("Replace"))
button_replace_text.show()
button_replace_text.set_relief(gtk.RELIEF_NONE)
button_replace_text.connect("clicked", self.do_replace_text)
alignment1 = gtk.Alignment(0.5, 0.5, 0, 0)
alignment1.show()
hbox2 = gtk.HBox(False, 0)
hbox2.show()
hbox2.set_spacing(2)
image136 = gtk.Image()
image136.set_from_stock(gtk.STOCK_FIND_AND_REPLACE, 4)
image136.show()
hbox2.pack_start(image136, False, False, 0)
label1 = gtk.Label(_("Replace"))
label1.show()
hbox2.pack_start(label1, False, False, 0)
alignment1.add(hbox2)
button_replace_text.add(alignment1)
self.findbox.pack_start(button_replace_text, False, False, 0)
#self.findbox.pack_start(gtk.VSeparator(), False, False, 0)
button2 = gtk.Button()
button2.set_tooltip_text(_("Replace All"))
button2.set_label(_("ReplaceAll"))
button2.show()
button2.set_relief(gtk.RELIEF_NONE)
img = gtk.Image()
img.set_from_stock("gtk-convert", 4)
img.show()
button2.set_image(img)
button2.connect("clicked", self.do_replace_text_all)
self.findbox.pack_start(button2, False, False, 0)
self.findbar.add(self.findbox)
self.vbox1.pack_start(self.findbar, False, False, 0)
#self.edit.contextmenu.append(menuitem_style)
#self.edit.connect("popup-menu", self._populate_popup)
if create:
self.window.add(self.vbox1)
pass
pass
def mdi_get_tab_menu(self, editbox=None, windowslist=0):
menu = gtk.Menu()
menuitem_new = gtk.ImageMenuItem("gtk-new")
menuitem_new.show()
menuitem_new.connect("activate", self.on_new)
menu.append(menuitem_new)
menuitem_close = gtk.ImageMenuItem("gtk-close")
menuitem_close.show()
menuitem_close.connect("activate", self.close_tab, editbox)
menu.append(menuitem_close)
menu.append(gtk.MenuItem())
notebox = self.notebox
for box in notebox.get_children():
menuitem = gtk.ImageMenuItem(box.edit.title)
menuitem.set_image(gtk.image_new_from_stock("gtk-dnd", gtk.ICON_SIZE_MENU))
menuitem.connect("activate", self.notebox_set_current, box)
menuitem.show()
menu.append(menuitem)
pass
if windowslist and config.single_instance_mode:
pass
menu.show_all()
return menu
def on_accel_connect_group(self, accel_group, acceleratable, keyval, modifier):
## 按 Alt-1, Alt-2... 切换标签页
## gtk.gdk.keyval_from_name('1') 为 49
num = keyval - 49
self.notebox.set_current_page(num)
return
def on_mdi_menu(self, widget, event, editbox=None, *args):
#-print self, widget, event, editbox, args
if event.button == 3:
#menu = self.menu_file
menu = self.mdi_get_tab_menu(editbox)
menu.popup(None, None, None, event.button, event.time)
return True
elif (
( event.type.value_name == "GDK_BUTTON_PRESS" and event.button == 2 ) or
( event.type.value_name == "GDK_2BUTTON_PRESS" and event.button == 1 )
):
# 标签上 中键/双击 关闭,空白处 中键/双击 新建
if editbox:
self.close_tab(editbox)
pass
else:
self.on_new()
pass
return True
#box = self.notebox
#label = box.get_tab_label( box.get_nth_page( box.get_current_page() ) )
return False
def on_mdi_switch_page(self, notebook, page, page_num, *user_param):
#-print 'on_mdi_switch_page:', notebook, page, page_num
## show/hide tabbar
self.notebox.unset_flags(gtk.CAN_FOCUS)
if self.notebox.get_n_pages() > 1:
self.notebox.set_show_tabs(True)
pass
else:
self.notebox.set_show_tabs(False)
pass
## edit, linkview
editbox = self.notebox.get_nth_page(page_num)
self.editbox = editbox
self.edit = editbox.edit
self.linkview = editbox.linkview
##
#self.edit.set_flags(gtk.CAN_DEFAULT)
#if self.edit.editfile: self.window.set_title(os.path.basename(self.editfile) + ' - ' + Title)
self.window.set_title(self.edit.title + ' - ' + Title)
##
try:
self.do_highlight_text_matches()
except:
pass
pass
def on_over_link(self, edit, alt, href):
#-print edit, alt, href
href = href or ""
uri = edit.get_main_frame().get_uri()
url = urllib2.unquote(uri)
if "#" in href and uri.split('#', 1)[0] == href.split('#', 1)[0]:
href = "#" + href.split('#', 1)[1]
self.window.set_tooltip_text(href)
pass
def notebox_set_current(self, widget, editbox=None):
editbox = editbox or widget # 考虑非事件的调用
num = self.notebox.page_num(editbox)
self.notebox.set_current_page(num)
self.window.present()
return
def notebox_set_label_text(self, editbox, text):
#self.notebox.set_tab_label_text(editbox, text)
self.notebox.set_menu_label_text(editbox, text)
label = gtk.Label(text)
label.show()
box = gtk.EventBox()
box.set_visible_window(0)
box.connect("button-press-event", self.on_mdi_menu, editbox)
box.add(label)
self.notebox.set_tab_label(editbox, box)
pass
def notebox_insert_page(self, editbox):
cn = self.notebox.get_current_page()
n = self.notebox.insert_page(editbox, None, cn+1)
self.notebox_set_label_text(editbox, editbox.edit.title)
self.notebox.set_tab_reorderable(editbox, True)
#self.notebox.show_all()
self.notebox.set_current_page(n)
##
#self.notebox.get_tab_label(editbox).connect("button-press-event", self.on_mdi_menu)
return
def new_edit(self, editfile):
global new_num
editbox = gtk.VBox()
editbox.show()
separator = gtk.HSeparator()
separator.show()
editbox.pack_start(separator, False, False)
hpaned = gtk.HPaned()
hpaned.set_border_width(0)
hpaned.set_position(170)
hpaned.show()
editbox.pack_start(hpaned, True, True)
## 导航栏
vbox1 = gtk.VBox()
label1 = gtk.Label(_("Navigation Pane"))
label1.set_alignment(0, 0)
vbox1.pack_start(label1, False, False)
scrolledwindow1 = gtk.ScrolledWindow()
scrolledwindow1.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolledwindow1.show()
scrolledwindow1.set_shadow_type(gtk.SHADOW_IN)
import webkitlinkview
linkview = webkitlinkview.LinkTextView()
linkview.connect('url-clicked', self.on_title_clicked)
linkview.connect('populate-popup', self._linkview_populate_popup)
linkview.show()
scrolledwindow1.add(linkview)
editbox.linkview = linkview
vbox1.pack_start(scrolledwindow1)
vbox1.show_all()
hpaned.pack1(vbox1, False, True)
editbox.navigation_pane = vbox1
## 编辑区
import webkitedit
scrolledwindow2 = gtk.ScrolledWindow()
scrolledwindow2.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrolledwindow2.show()
scrolledwindow2.set_shadow_type(gtk.SHADOW_IN)
edit = webkitedit.WebKitEdit(editfile)
edit.show()
edit.connect("load-finished", self.on_load_finished)
edit.connect("hovering-over-link", self.on_over_link)
edit.set_flags(gtk.CAN_FOCUS)
edit.set_flags(gtk.CAN_DEFAULT)
self.window.present()
scrolledwindow2.add(edit)
editbox.edit = edit
hpaned.pack2(scrolledwindow2, True, True)
if editfile:
edit.lastDir = os.path.dirname(editfile)
edit.title = os.path.basename(editfile)
self.add_recent(editfile)
pass
else:
if config.mdi_mode or config.single_instance_mode:
edit.title = _("[New Document] %s") % new_num
new_num += 1
pass
else:
edit.title = _("[New Document]")
pass
editbox.connect("button-press-event", lambda *i: True) ## 中止鼠标按钮事件向上传递
gobject.idle_add(proc_webkit_color, edit, linkview)
return editbox
def _populate_popup(self, view, menu):
pass
def zoom(self, level):
self.edit.set_zoom_level(level)
pass
def zoom_100(self, *args):
self.edit.set_zoom_level(1.0)
pass
def zoom_in(self, *args):
self.edit.zoom_in()
pass
def zoom_out(self, *args):
self.edit.zoom_out()
pass
def _linkview_populate_popup(self, view, menu):
# 检查是否有链接相关菜单项
href = ""
if menu_find_with_stock(menu, 'gtk-open') > -1:
href = view.get_main_frame().get_title()
pass
## 取消原先的菜单
#menu.destroy()
#menu = gtk.Menu()
for i in menu.get_children():
menu.remove(i)
pass
## 跳转到
if href:
menuitem_jump_to = gtk.ImageMenuItem("gtk-jump-to")
menuitem_jump_to.show()
menuitem_jump_to.connect("activate", self.edit.go_anchor, href)
menu.append(menuitem_jump_to)
menuitem_select = gtk.ImageMenuItem(_("_Select this"))
menuitem_select.set_image(gtk.image_new_from_stock(gtk.STOCK_SELECT_ALL, gtk.ICON_SIZE_MENU))
menuitem_select.show()
menuitem_select.set_tooltip_markup(_("您也可以直接<b>双击</b>以选择该章节文字"))
menuitem_select.connect("activate", self.edit.select_section, href)
menu.append(menuitem_select)
menu.append(gtk.MenuItem())
pass
## 更新目录
menuitem_update_contents = gtk.ImageMenuItem(_("Update _Contents"))
menuitem_update_contents.show()
menuitem_update_contents.connect("activate", self.view_update_contents)
img = gtk.image_new_from_stock(gtk.STOCK_INDEX, gtk.ICON_SIZE_MENU)
menuitem_update_contents.set_image(img)
menu.append(menuitem_update_contents)
menuitem_toggle_numbered_title = gtk.ImageMenuItem(_("Toggle _Numbered Title"))
menuitem_toggle_numbered_title.show()
menuitem_toggle_numbered_title.connect("activate", self.view_toggle_autonumber)
img = gtk.image_new_from_stock(gtk.STOCK_SORT_DESCENDING, gtk.ICON_SIZE_MENU)
menuitem_toggle_numbered_title.set_image(img)
menu.append(menuitem_toggle_numbered_title)
## 缩放菜单
linkview = self.linkview
menuitem_separator10 = gtk.MenuItem()
menuitem_separator10.show()
menu.append(menuitem_separator10)
menuitem_zoom_in = gtk.ImageMenuItem(gtk.STOCK_ZOOM_IN)
menuitem_zoom_in.connect("activate", lambda *i: linkview.zoom_in())
menuitem_zoom_in.show()
menu.append(menuitem_zoom_in)
menuitem_zoom_out = gtk.ImageMenuItem(gtk.STOCK_ZOOM_OUT)
menuitem_zoom_out.connect("activate", lambda *i: linkview.zoom_out())
menuitem_zoom_out.show()
menu.append(menuitem_zoom_out)
menuitem_zoom_100 = gtk.ImageMenuItem(gtk.STOCK_ZOOM_100)
menuitem_zoom_100.connect("activate", lambda *i: linkview.set_zoom_level(1.0))
menuitem_zoom_100.show()
menu.append(menuitem_zoom_100)
menu.show_all()
pass
def on_title_clicked(self, widget, href, type):
if href.startswith('+'):
self.edit.select_section(href.split('#', 1)[1])
return True
href = href.split('#', 1)[1]
self.edit.go_anchor(href)
pass
def on_load_finished(self, edit, *args):
#-print 'on_load_finished:'
self.view_update_contents()
if edit._html == "":
edit.set_saved()
pass
pass
def close_tab(self, widget=None, editbox=None, *args):
notebox = self.notebox
if widget and 'edit' in widget.__dict__:
editbox = widget
pass
if not editbox:
n = notebox.get_current_page()
editbox = notebox.get_nth_page(n)
pass
edit = editbox.edit
linkview = editbox.linkview
self.window.show()
if not edit.is_saved():
## r: 1, -1, 0 => yes, no, cancel
r = gtkdialogs.savechanges(_("%s Save Changes?") % edit.title)
if r == 1:
filename = self.on_save()
if not filename:
return True
pass
elif r == 0:
return True
pass
# 关闭标签
notebox.remove(editbox)
edit.destroy()
linkview.destroy()
editbox.destroy()
# 无标签时关闭窗口
if self.notebox.get_n_pages():
return True
Windows.remove(self)
gtk.gdk.threads_leave()
self.window.destroy()
if not Windows:
gtk.main_quit()
return
def on_close(self, *args):
'''关闭窗口
'''
#-print 'on_close:', self
#@TODO: 退出时未保存提示
for i in range(self.notebox.get_n_pages()):
self.close_tab()
pass
if self.notebox.get_n_pages():
return True
try:
Windows.remove(self)
pass
except:
pass
gtk.gdk.threads_leave()
self.window.destroy()
if not Windows:
gtk.main_quit()
pass
def on_quit(self, *args):
#-print 'on_quit:'
windows = reversed(Windows)
for window in windows:
window.on_close()
pass
gtk.main_quit()
pass
def on_new(self, *args):
#-print 'on_new:'
return self.open("")
def on_new_window(self, *args):
'''打开新窗口
'''
if config.single_instance_mode:
return MainWindow()
else:
return os.spawnvp(os.P_NOWAIT, sys.argv[0], ['gwrite'])
pass
def add_recent(self, filename):
uri = 'file://' + filename
self.recent.add_full(uri, {'mime_type':'text/html', 'app_name':'gwrite', 'app_exec':'gwrite', 'group':'gwrite'})
def open(self, filename=""):
self.window.present()
# mdi mode
if config.mdi_mode:
if filename:
for editbox in self.notebox.get_children():
if editbox.edit.editfile == filename:
self.notebox.set_current_page(self.notebox.page_num(editbox))
return
pass
pass
editbox = self.new_edit(filename)
self.notebox_insert_page(editbox)
return
# 如果当前空文档,则在当前窗口打开
if filename and self.edit.editfile == '' and self.edit.is_saved():
self.window.set_title(os.path.basename(filename) + ' - ' + Title)
self.edit.lastDir = os.path.dirname(filename)
self.edit.editfile = filename
self.edit._html = ""
if filename and os.access(filename, os.R_OK):
self.edit.open(filename)
self.add_recent(filename)
pass
pass
elif config.single_instance_mode:
MainWindow(editfile = filename)
pass
else:
if filename:
os.spawnvp(os.P_NOWAIT, sys.argv[0], ['gwrite', filename])
pass
else:
os.spawnvp(os.P_NOWAIT, sys.argv[0], ['gwrite'])
pass
pass
pass
def on_select_recent(self, menu):
filename = menu. get_current_item().get_uri_display()
#-print 'on_select_recent:', filename
self.open(filename)
pass
def on_open(self, *args):
#-print 'on_open:'
filename = gtkdialogs.open(title=_('Open'),
name_mimes=[
[_("Html Document"), "text/html"],
[_("MS Doc Document"), "application/msword"],
])
if filename and os.access(filename, os.R_OK):
self.open(filename)
pass
gtk.gdk.threads_leave()
pass
def on_save(self, *args):
#-print 'on_save:'
html = self.edit.get_html()
if self.edit.editfile:
filename = self.edit.editfile
else:
#current_name = _('新建文档')
#current_name = ''
current_name = get_doctitle(html)
filename = gtkdialogs.save(title=_('Save'),
name_mimes=[[_("Html Document"), "text/html"]],
current_name=current_name,)
if filename and not '.' in os.path.basename(filename):
filename = filename + '.html'
if filename:
try:
file(filename, 'w').write(html)
pass
except:
gtkdialogs.warning(_("Unable to write to file."))
return False
self.edit.lastDir = os.path.dirname(filename)
if not self.edit.editfile: self.add_recent(filename) #添加到最近文件
self.editfile = filename
self.edit.set_saved()
self.window.set_title(os.path.basename(filename) + ' - ' + Title)
## 更新标签名
self.edit.editfile = filename
self.edit.title = os.path.basename(filename)
self.notebox_set_label_text(self.editbox, self.edit.title)
pass
gtk.gdk.threads_leave()
return filename
def on_save_as(self, *args):
#-print 'on_save_as:'
html = self.edit.get_html()
#current_name = _('新建文档')
#current_name = ''
current_name = get_doctitle(html)
filename = gtkdialogs.save(title=_('Save As'),
name_mimes=[[_("Html Document"), "text/html"]],
current_name=current_name, folder=self.edit.lastDir,)
if filename and not '.' in os.path.basename(filename):
filename = filename + '.html'
if filename:
try:
file(filename, 'w').write(html)
pass
except:
gtkdialogs.warning(_("Unable to write to file."))
return False
self.add_recent(filename) #添加到最近文件
self.edit.lastDir = os.path.dirname(filename)
pass
gtk.gdk.threads_leave()
pass
def on_word_counts(self, *args):
document = self.edit.get_text().decode('utf8')
selection = self.edit.get_selection()
#-print text
#-print selection
# 行: '', 文档, 选中范围
# 列: 字数及英文单词数, 字符数(含空格), 字符数(不含空格), 段落数, 行数, 英文单词, 中文字
text = document
words_cn = len( re.findall(u'[\u4e00-\uffff]', text) )
words_en = len( re.findall(u'\\w+', text) )
words = words_cn + words_en
characters_with_spaces = len(text)
characters_no_spaces = len(''.join(text.split()))
_lines = text.splitlines()
lines = len(_lines)
paragraphs = len([i for i in _lines if i])
##
text = selection
s_words_cn = len( re.findall(u'[\u4e00-\uffff]', text) )
s_words_en = len( re.findall(u'\\w+', text) )
s_words = s_words_cn + s_words_en
s_characters_with_spaces = len(text)
s_characters_no_spaces = len(''.join(text.split()))
_s_lines = text.splitlines()
s_lines = len(_s_lines)
s_paragraphs = len([i for i in _s_lines if i])
info = (
("", _("Document"), selection and _("Selection")),
(_("Words: "), words, selection and s_words, ),
(_("Characters (with spaces): "), characters_with_spaces, selection and s_characters_with_spaces),
(_("Characters (no spaces): "), characters_no_spaces, selection and s_characters_no_spaces),
(_("Paragraphs: "), paragraphs, selection and s_paragraphs),
(_("Lines: "), lines, selection and s_lines),
(_("English words: "), words_en, selection and s_words_en),
(_("Chinese characters: "), words_cn, selection and s_words_cn),
)
#-print info
gtkdialogs.infotablebox(_("Word Counts"), "<b>%s</b>" % self.edit.title, info)
return
def on_print(self, *args):
#-print 'on_print:'
self.edit.do_print()
pass
def do_undo(self, *args):
#-print 'do_undo:'
self.window.present()
self.edit.do_undo()
pass
def do_redo(self, *args):
#-print 'do_redo:'
self.window.present()
self.edit.do_redo()
pass
def do_cut(self, *args):
#-print 'do_cut:'
self.window.present()
self.edit.do_cut()
pass
def do_copy(self, *args):
#-print 'do_copy:'
self.window.present()
self.edit.do_copy()
pass
def do_paste(self, *args):
#-print 'do_paste:'
self.window.present()
self.edit.do_paste()
pass
def do_paste_unformatted(self, *args):
#-print 'do_paste_unformatted:'
self.edit.do_paste_unformatted()
return
def do_delete(self, *args):
#-print 'do_delete:'
self.window.present()
self.edit.do_delete()
pass
def do_selectall(self, *args):
#-print 'do_selectall:'
self.window.present()
self.edit.do_selectall()
pass
def show_findbar(self, *args):
#-print 'show_findbar:'
self.findbar.show_all()
self.entry_searchtext.grab_focus()
self.do_find_text(self.entry_searchtext)
pass
def view_update_contents(self, *args):
#-print 'view_update_contents:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_view_update_contents() )
pass
def view_toggle_autonumber(self, *args):
#-print 'view_toggle_autonumber:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_view_toggle_autonumber() )
pass
def view_sourceview(self, *args):
#-print 'view_sourceview:'
self.window.present()
## 源码模式隐藏导航栏
#@NOTE 执行顺序和 idle_add 是为了避免闪烁
if not self.edit.get_view_source_mode():
## 先转到源码模式,再 idle_add 隐藏导航条,以便显示变化平滑
self.edit.toggle_html_view()
gobject.idle_add( self.editbox.navigation_pane.hide )
pass
else:
## 先显示导航条,再 idle_add 转为所见所得模式,以便显示变化平滑
self.editbox.navigation_pane.show_all()
gobject.idle_add( self.edit.toggle_html_view )
pass
#self.edit.do_bodyhtml_view()
pass
def do_update_images(self, *args):
#-print 'do_update_images:'
self.window.present()
self.edit.do_image_base64()
pass
def do_insertimage(self, *args):
#-print 'do_insertimage:'
src = gtkdialogs.open(title=_('InsertImage'), name_mimes=[[_("Image Files"), "image/*"]])
if src:
self.edit.do_insertimage(src)
pass
def do_createlink(self, *args):
#-print 'do_createlink:'
##print self.edit.get_link_message()
link = gtkdialogs.inputbox(title=_('Create Link'), label=_('URL:'), text="")
if link and link != "http://":
self.edit.do_createlink(link)
pass
def do_inserthorizontalrule(self, *args):
#-print 'do_inserthorizontalrule:'
self.window.present()
self.edit.do_inserthorizontalrule()
pass
def do_insert_table(self, *args):
#-print 'do_insert_table:'
cow,row = gtkdialogs.spinbox2(title=_('Insert Table'),label1=_('Rows:'),value1=3, label2=_('Cows:'),value2=3)
self.edit.do_insert_table(cow, row)
pass
def do_insert_html(self, *args):
#-print 'do_insert_html:'
html = gtkdialogs.textbox(title=_('Insert Html'), text='')
if html:
self.edit.do_insert_html(html)
pass
def do_insert_latex_math_equation(self, *args):
'''Insert Latex math equation
'''
latex = gtklatex.latex_dlg()
if latex:
img = gtklatex.tex2html(latex)
self.edit.do_insert_html(img)
pass
pass
def do_insert_contents(self, *args):
#-print 'do_insert_contents:'
self.window.present()
self.edit.do_insert_contents()
pass
def do_formatblock_p(self, *args):
#-print 'do_formatblock_p:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_formatblock_p() )
pass
def do_formatblock_h1(self, *args):
#-print 'do_formatblock_h1:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_formatblock_h1() )
pass
def do_formatblock_h2(self, *args):
#-print 'do_formatblock_h2:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_formatblock_h2() )
pass
def do_formatblock_h3(self, *args):
#-print 'do_formatblock_h3:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_formatblock_h3() )
pass
def do_formatblock_h4(self, *args):
#-print 'do_formatblock_h4:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_formatblock_h4() )
pass
def do_formatblock_h5(self, *args):
#-print 'do_formatblock_h5:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_formatblock_h5() )
pass
def do_formatblock_h6(self, *args):
#-print 'do_formatblock_h6:'
self.window.present()
self.linkview.updatehtmllinks( self.edit.do_formatblock_h6() )
pass
def do_insertunorderedlist(self, *args):
#-print 'do_insertunorderedlist:'
self.window.present()
self.edit.do_insertunorderedlist()
pass
def do_insertorderedlist(self, *args):
#-print 'do_insertorderedlist:'
self.window.present()
self.edit.do_insertorderedlist()
pass
def do_formatblock_div(self, *args):
#-print 'do_formatblock_address:'
self.window.present()
self.edit.do_formatblock_div()
pass
def do_formatblock_address(self, *args):
#-print 'do_formatblock_address:'
self.window.present()
self.edit.do_formatblock_address()
pass
def do_formatblock_code(self, *args):
#-print 'do_formatblock_code:'
self.window.present()
self.edit.do_formatblock_code()
pass
def do_formatblock_blockquote(self, *args):
#-print 'do_formatblock_blockquote:'
self.window.present()
self.edit.do_formatblock_blockquote()
pass
def do_formatblock_pre(self, *args):
#-print 'do_formatblock_pre:'
self.window.present()
self.edit.do_formatblock_pre()
pass
def on_bold(self, *args):
#-print 'on_bold:'
self.window.present()
self.edit.do_bold()
pass
def do_underline(self, *args):
#-print 'do_underline:'
self.window.present()
self.edit.do_underline()
pass
def do_italic(self, *args):
#-print 'do_italic:'
self.window.present()
self.edit.do_italic()
pass
def do_strikethrough(self, *args):
#-print 'do_strikethrough:'
self.window.present()
self.edit.do_strikethrough()
pass
def do_font_fontname(self, widget, fontname):
#-print 'do_font_fontname:', fontname
self.window.present()
self.edit.do_font_fontname(fontname)
pass
def do_fontsize_1(self, *args):
#-print 'do_fontsize_1:'
self.window.present()
self.edit.do_fontsize_11()
pass
def do_fontsize_2(self, *args):
#-print 'do_fontsize_2:'
self.window.present()
self.edit.do_fontsize_2()
pass
def do_fontsize_3(self, *args):
#-print 'do_fontsize_3:'
self.window.present()
self.edit.do_fontsize_3()
pass
def do_fontsize_4(self, *args):
#-print 'do_fontsize_4:'
self.window.present()
self.edit.do_fontsize_4()
pass
def do_fontsize_5(self, *args):
#-print 'do_fontsize_5:'
self.window.present()
self.edit.do_fontsize_5()
pass
def do_fontsize_6(self, *args):
#-print 'do_fontsize_6:'
self.window.present()
self.edit.do_fontsize_6()
pass
def do_fontsize_7(self, *args):
#-print 'do_fontsize_7:'
self.window.present()
self.edit.do_fontsize_7()
pass
def do_color_forecolor(self, *args):
#-print 'on_color_forecolor:'
if "forecolor" in self.__dict__:
self.edit.grab_focus()
self.edit.do_color_forecolor(self.forecolor)
pass
else:
self.on_color_select_forecolor()
pass
pass
def on_color_select_forecolor(self, *args):
#-print 'on_color_select_forecolor:'
color = gtkdialogs.colorbox()
if color:
self.forecolor = color
self.edit.do_color_forecolor (color)
pass
pass
def do_color_hilitecolor(self, *args):
#-print 'do_color_hilitecolor:'
if "hilitecolor" in self.__dict__:
self.edit.grab_focus()
self.edit.do_color_hilitecolor(self.hilitecolor)
pass
else:
self.on_color_select_hilitecolor()
pass
pass
def on_color_select_hilitecolor(self, *args):
#-print 'on_color_select_hilitecolor:', args
# 处理 gtk.MenuToolButton 箭头重复事件
if self.__dict__.get('_on_color_select_hilitecolor'):
return True
self._on_color_select_hilitecolor = 1
color = gtkdialogs.colorbox()
self._on_color_select_hilitecolor = 0
if color:
self.hilitecolor = color
self.edit.do_color_hilitecolor(color)
return False
def do_removeformat(self, *args):
#-print 'do_removeformat:'
self.window.present()
self.edit.do_removeformat()
pass
def do_justifyleft(self, *args):
#-print 'do_justifyleft:'
self.window.present()
self.edit.do_justifyleft()
pass
def do_justifycenter(self, *args):
#-print 'do_justifycenter:'
self.window.present()
self.edit.do_justifycenter()
pass
def do_justifyfull(self, *args):
#-print 'do_justify:'
self.window.present()
self.edit.do_justifyfull()
pass
def do_justifyright(self, *args):
#-print 'do_justifyright:'
self.window.present()
self.edit.do_justifyright()
pass
def do_indent(self, *args):
#-print 'do_indent:'
self.window.present()
self.edit.do_indent()
pass
def do_outdent(self, *args):
#-print 'do_outdent:'
self.edit.do_outdent()
pass
def do_subscript(self, *args):
#-print 'do_subscript:'
self.window.present()
self.edit.do_subscript()
pass
def do_superscript(self, *args):
#-print 'do_superscript:'
self.window.present()
self.edit.do_superscript()
pass
def on_about(self, *args):
#-print 'on_about:'
authors = [
"Jiahua Huang <jhuangjiahua(at)gmail.com>",
"Aron Xu <happyaron.xu(at)gmail.com>",
]
about = gobject.new(gtk.AboutDialog,
name=_("GWrite"),
program_name=_("GWrite"),
logo_icon_name="gwrite",
version=__version__,
copyright=_("Copyright (C) 2009-2010 Jiahua Huang, Aron Xu"),
comments=_("Simple GTK+ HTML5 Rich Text Editor"),
license="LGPLv3+",
website="http://gwrite.googlecode.com/",
website_label="gwrite.googlecode.com",
authors=authors)
#about.set_transient_for(self.window)
about.run()
about.destroy()
pass
def hide_findbar(self, *args):
#-print 'hide_findbar:'
self.findbar.hide()
pass
def do_highlight_text_matches(self, *args):
text = self.entry_searchtext.get_text()
if text:
self.edit.unmark_text_matches()
matches = self.edit.mark_text_matches(text, 0, 0)
self.edit.set_highlight_text_matches(1)
self.entry_searchtext.set_tooltip_markup(_("%s matches") % matches)
pass
else:
self.edit.unmark_text_matches()
self.edit.set_highlight_text_matches(0)
self.entry_searchtext.set_tooltip_text(_("Search text"))
pass
pass
def do_find_text_backward(self, *args):
#-print 'do_find_text_backward:'
text = self.entry_searchtext.get_text()
if not text: return
self.edit.do_find_text_backward(text)
pass
def do_find_text(self, *args):
#-print 'do_find_text:'
# 点击前面的图标为向上查找
#if self.entry_searchtext.get_pointer()[0] < 30:
# return self.do_find_text_backward()
text = self.entry_searchtext.get_text()
if text:
self.edit.do_find_text(text)
pass
def do_replace_text(self, *args):
#-print 'do_replace_text:'
ffindtext = self.entry_searchtext.get_text()
replacetext = self.entry_replace_text.get_text()
if ffindtext:
self.edit.do_replace_text(ffindtext, replacetext)
pass
def do_replace_text_all(self, *args):
#-print 'do_replace_text_all:'
ffindtext = self.entry_searchtext.get_text()
replacetext = self.entry_replace_text.get_text()
if ffindtext:
self.edit.do_replace_text_all(ffindtext, replacetext)
pass
def get_custom_widget(self, id, string1, string2, int1, int2):
w = gtk.Label(_("(custom widget: %s)") % id)
return w
##cmd test
usage = _('''GWrite
Usage:
gwrite [OPTION...] [FILE...] - Edit html files
Options:
-h, --help Show help options
-v, --version Show version information
''')
def openedit(filename=""):
'''MainWindow() 的包装
要 return False 以免 gtk.idle_add, gtk.timeout_add 重复执行
'''
Windows[0].open(filename)
return False
def _listen(s):
'''监听 unix socket
'''
#-print 'listen:', s
while 1:
conn, addr = s.accept()
rev = conn.recv(102400)
for i in rev.split('\n'):
#-print 'Open:', i
gobject.idle_add(openedit, i)
pass
pass
pass
def main():
'''处理命令行
'''
import os, sys
import socket
## 处理命令行参数
import getopt
config.load()
gtk.gdk.threads_init()
try:
opts, args = getopt.getopt(sys.argv[1:], 'vh', ['version', 'help'])
pass
except:
print usage
return
for o, v in opts:
if o in ('-h', '--help'):
print usage
return
elif o in ('-v', '--version'):
print __version__
return
pass
## 要 打开的文件
editfiles = [ os.path.abspath(i) for i in args ]
## 单实例模式
if config.single_instance_mode:
## 设 profdir 和 ctlfile
profdir = config.profdir
## 单实例运行, 尝试用已打开 GWrite
ctlfile = config.ctlfile
try:
## 已打开 GWrite 的情况
s = socket.socket(socket.AF_UNIX)
s.connect(ctlfile)
s.send('\n'.join(editfiles))
#-print 'sent:', editfiles
return
except:
#raise
#-print 'new:'
pass
## 监听 socket
s = socket.socket(socket.AF_UNIX)
if os.access(ctlfile, os.R_OK): os.remove(ctlfile)
s.bind(ctlfile)
s.listen(1)
thread.start_new_thread(_listen, (s,))
pass
## 打开文件
edit = MainWindow( editfiles[0:] and editfiles[0] or '' )
for i in editfiles[1:]:
i = os.path.abspath(i)
edit.open(i)
pass
## 处理 Gtk 图标主题
settings = gtk.settings_get_default( )
if settings.get_property( 'gtk-icon-theme-name' ) == 'hicolor':
settings.set_property( 'gtk-icon-theme-name', 'Tango')
pass
## 处理额外图标路径
icon_theme = gtk.icon_theme_get_default()
icon_dir = os.path.dirname(__file__) + '/icons'
icon_theme.append_search_path(icon_dir)
##
gtk.gdk.threads_enter()
gtk.main()
gtk.gdk.threads_leave()
if __name__ == '__main__':
main()
| [
[
8,
0,
0.0018,
0.0016,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0032,
0.0004,
0,
0.66,
0.0455,
162,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.004,
0.0004,
0,
0.66,
... | [
"'''GWrite\n@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}\n@license: LGPLv3+\n'''",
"__version__ = '0.5.1'",
"import gtk, gobject",
"import gtkdialogs",
"import gtklatex",
"import config",
"import os, sys",
"import thread",
"import re",
"import urllib2",
"try: import i18n\nexcept: from gette... |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
# Author: Huang Jiahua <jhuangjiahua@gmail.com>
# License: LGPLv3+
# Last modified:
app = 'gwrite'
import os, sys
import gettext
if os.path.isdir(os.path.dirname(sys.argv[0]) + '/../build/mo'):
gettext.install(app, os.path.dirname(sys.argv[0]) + '/../build/mo', unicode=True)
else:
gettext.install(app, unicode=True)
if __name__=="__main__":
print _('')
| [
[
14,
0,
0.3636,
0.0455,
0,
0.66,
0,
494,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.4545,
0.0455,
0,
0.66,
0.25,
688,
0,
2,
0,
0,
688,
0,
0
],
[
1,
0,
0.5,
0.0455,
0,
0.66,
... | [
"app = 'gwrite'",
"import os, sys",
"import gettext",
"if os.path.isdir(os.path.dirname(sys.argv[0]) + '/../build/mo'):\n gettext.install(app, os.path.dirname(sys.argv[0]) + '/../build/mo', unicode=True)\nelse:\n gettext.install(app, unicode=True)",
" gettext.install(app, os.path.dirname(sys.argv[0... |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''config
@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}
@license: LGPLv3+
'''
import gtk, gobject
import os, sys
try: import cPickle as pickle
except: import pickle
try: import i18n
except: from gettext import gettext as _
single_instance_mode = 0
mdi_mode = 1
def getconf():
'''获取 config
'''
config = {}
##
profdir = os.environ['HOME'] + '/.config/GWrite'
if not os.path.isdir(profdir): os.makedirs(profdir)
ctlfile = profdir + '/gwrite.ctl' + os.environ['DISPLAY']
prof = profdir + '/gwrite.conf'
user_stylesheet_file = profdir + '/user_stylesheet_uri.css'
##
for k, v in globals().items():
if not k.startswith('__') and (
isinstance(v, str)
or isinstance(v, int)
or isinstance(v, long)
or isinstance(v, float)
or isinstance(v, dict)
or isinstance(v, list)
or isinstance(v, bool)
):
config[k] = v
pass
config['profdir'] = profdir
config['ctlfile'] = ctlfile
config['prof'] = prof
config['user_stylesheet_file'] = user_stylesheet_file
return config
def load():
'''读取 config
'''
config = getconf()
##
try: config.update(pickle.loads(file(config['prof']).read()))
except: pass
##
globals().update(config)
return config
def write():
'''保存 config
'''
config = getconf()
file(config['prof'], 'w').write(pickle.dumps(config))
return config
def show_preference_dlg(title=_("Preferences"), parent=None, *args):
'''首选项对话框
'''
dlg = gtk.Dialog(title, parent, gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK ))
dlg.set_default_size(200, 300)
##
config = getconf()
##
notebook1 = gtk.Notebook()
notebook1.set_tab_pos(gtk.POS_TOP)
notebook1.set_scrollable(False)
notebook1.show()
vbox1 = gtk.VBox(False, 0)
vbox1.show()
vbox1.set_spacing(0)
checkbutton_mdi_mode = gtk.CheckButton()
checkbutton_mdi_mode.set_active(False)
checkbutton_mdi_mode.set_label(_("Use Tabs MDI interface"))
checkbutton_mdi_mode.set_tooltip_text(_("Supports editing multiple files in one window (known sometimes as tabs or MDI)"))
checkbutton_mdi_mode.show()
checkbutton_mdi_mode.set_border_width(10)
checkbutton_mdi_mode.set_relief(gtk.RELIEF_NORMAL)
vbox1.pack_start(checkbutton_mdi_mode, False, False, 0)
checkbutton_single_instance_mode = gtk.CheckButton()
checkbutton_single_instance_mode.set_active(False)
checkbutton_single_instance_mode.set_label(_("Single Instance mode"))
checkbutton_single_instance_mode.set_tooltip_text(_("Only one instance of the application will be running at a time."))
checkbutton_single_instance_mode.show()
checkbutton_single_instance_mode.set_border_width(10)
checkbutton_single_instance_mode.set_relief(gtk.RELIEF_NORMAL)
vbox1.pack_start(checkbutton_single_instance_mode, False, False, 0)
hseparator1 = gtk.HSeparator()
hseparator1.show()
vbox1.pack_start(hseparator1, False, False, 0)
label2 = gtk.Label(_("You need to restart gwrite for some options to take effect."))
label2.set_alignment(0, 0)
label2.set_angle(0)
label2.set_padding(20, 20)
label2.set_line_wrap(True)
label2.set_width_chars(30)
label2.show()
vbox1.pack_start(label2)
label1 = gtk.Label(_("Run mode"))
label1.set_angle(0)
label1.set_padding(0, 0)
label1.set_line_wrap(False)
label1.show()
notebook1.append_page(vbox1, label1)
##
checkbutton_mdi_mode.set_active(config.get("mdi_mode", 0))
checkbutton_single_instance_mode.set_active(config.get("single_instance_mode", 0))
##
dlg.vbox.pack_start(notebook1, True, True, 0)
resp = dlg.run()
##
config['mdi_mode'] = checkbutton_mdi_mode.get_active()
config['single_instance_mode'] = checkbutton_single_instance_mode.get_active()
##
dlg.destroy()
if resp == gtk.RESPONSE_CANCEL:
return {}
globals().update(config)
return config
if __name__=="__main__":
load()
print show_preference_dlg()
write()
| [
[
8,
0,
0.0306,
0.0272,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0544,
0.0068,
0,
0.66,
0.0909,
166,
0,
2,
0,
0,
166,
0,
0
],
[
1,
0,
0.0612,
0.0068,
0,
0.66... | [
"'''config\n@author: U{Jiahua Huang <jhuangjiahua@gmail.com>}\n@license: LGPLv3+\n'''",
"import gtk, gobject",
"import os, sys",
"try: import cPickle as pickle\nexcept: import pickle",
"try: import cPickle as pickle",
"except: import pickle",
"try: import i18n\nexcept: from gettext import gettext as _",... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.